diff --git a/.bingo/Variables.mk b/.bingo/Variables.mk index 8c08ceade..e48f105be 100644 --- a/.bingo/Variables.mk +++ b/.bingo/Variables.mk @@ -23,11 +23,17 @@ $(BINGO): $(BINGO_DIR)/bingo.mod @echo "(re)installing $(GOBIN)/bingo-v0.9.0" @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=bingo.mod -o=$(GOBIN)/bingo-v0.9.0 "github.com/bwplotka/bingo" -CONTROLLER_GEN := $(GOBIN)/controller-gen-v0.19.0 +CONFTEST := $(GOBIN)/conftest-v0.62.0 +$(CONFTEST): $(BINGO_DIR)/conftest.mod + @# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies. + @echo "(re)installing $(GOBIN)/conftest-v0.62.0" + @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=conftest.mod -o=$(GOBIN)/conftest-v0.62.0 "github.com/open-policy-agent/conftest" + +CONTROLLER_GEN := $(GOBIN)/controller-gen-v0.20.0 $(CONTROLLER_GEN): $(BINGO_DIR)/controller-gen.mod @# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies. - @echo "(re)installing $(GOBIN)/controller-gen-v0.19.0" - @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=controller-gen.mod -o=$(GOBIN)/controller-gen-v0.19.0 "sigs.k8s.io/controller-tools/cmd/controller-gen" + @echo "(re)installing $(GOBIN)/controller-gen-v0.20.0" + @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=controller-gen.mod -o=$(GOBIN)/controller-gen-v0.20.0 "sigs.k8s.io/controller-tools/cmd/controller-gen" CRD_DIFF := $(GOBIN)/crd-diff-v0.5.0 $(CRD_DIFF): $(BINGO_DIR)/crd-diff.mod @@ -47,17 +53,17 @@ $(GOJQ): $(BINGO_DIR)/gojq.mod @echo "(re)installing $(GOBIN)/gojq-v0.12.17" @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=gojq.mod -o=$(GOBIN)/gojq-v0.12.17 "github.com/itchyny/gojq/cmd/gojq" -GOLANGCI_LINT := $(GOBIN)/golangci-lint-v2.6.2 +GOLANGCI_LINT := $(GOBIN)/golangci-lint-v2.7.2 $(GOLANGCI_LINT): $(BINGO_DIR)/golangci-lint.mod @# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies. - @echo "(re)installing $(GOBIN)/golangci-lint-v2.6.2" - @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=golangci-lint.mod -o=$(GOBIN)/golangci-lint-v2.6.2 "github.com/golangci/golangci-lint/v2/cmd/golangci-lint" + @echo "(re)installing $(GOBIN)/golangci-lint-v2.7.2" + @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=golangci-lint.mod -o=$(GOBIN)/golangci-lint-v2.7.2 "github.com/golangci/golangci-lint/v2/cmd/golangci-lint" -GORELEASER := $(GOBIN)/goreleaser-v1.26.2 +GORELEASER := $(GOBIN)/goreleaser-v2.11.2 $(GORELEASER): $(BINGO_DIR)/goreleaser.mod @# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies. - @echo "(re)installing $(GOBIN)/goreleaser-v1.26.2" - @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=goreleaser.mod -o=$(GOBIN)/goreleaser-v1.26.2 "github.com/goreleaser/goreleaser" + @echo "(re)installing $(GOBIN)/goreleaser-v2.11.2" + @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=goreleaser.mod -o=$(GOBIN)/goreleaser-v2.11.2 "github.com/goreleaser/goreleaser/v2" HELM := $(GOBIN)/helm-v3.18.4 $(HELM): $(BINGO_DIR)/helm.mod @@ -65,11 +71,17 @@ $(HELM): $(BINGO_DIR)/helm.mod @echo "(re)installing $(GOBIN)/helm-v3.18.4" @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=helm.mod -o=$(GOBIN)/helm-v3.18.4 "helm.sh/helm/v3/cmd/helm" -KIND := $(GOBIN)/kind-v0.30.0 +KIND := $(GOBIN)/kind-v0.31.0 $(KIND): $(BINGO_DIR)/kind.mod @# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies. - @echo "(re)installing $(GOBIN)/kind-v0.30.0" - @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=kind.mod -o=$(GOBIN)/kind-v0.30.0 "sigs.k8s.io/kind" + @echo "(re)installing $(GOBIN)/kind-v0.31.0" + @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=kind.mod -o=$(GOBIN)/kind-v0.31.0 "sigs.k8s.io/kind" + +KUBE_SCORE := $(GOBIN)/kube-score-v1.20.0 +$(KUBE_SCORE): $(BINGO_DIR)/kube-score.mod + @# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies. + @echo "(re)installing $(GOBIN)/kube-score-v1.20.0" + @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=kube-score.mod -o=$(GOBIN)/kube-score-v1.20.0 "github.com/zegl/kube-score/cmd/kube-score" KUSTOMIZE := $(GOBIN)/kustomize-v5.7.1 $(KUSTOMIZE): $(BINGO_DIR)/kustomize.mod diff --git a/.bingo/conftest.mod b/.bingo/conftest.mod new file mode 100644 index 000000000..294b93132 --- /dev/null +++ b/.bingo/conftest.mod @@ -0,0 +1,5 @@ +module _ // Auto generated by https://github.com/bwplotka/bingo. DO NOT EDIT + +go 1.24.6 + +require github.com/open-policy-agent/conftest v0.62.0 diff --git a/.bingo/conftest.sum b/.bingo/conftest.sum new file mode 100644 index 000000000..b34a3b44b --- /dev/null +++ b/.bingo/conftest.sum @@ -0,0 +1,2041 @@ +cel.dev/expr v0.20.0 h1:OunBvVCfvpWlt4dN7zg3FM6TDkzOePe1+foGJ9AXeeI= +cel.dev/expr v0.20.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= +cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= +cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= +cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= +cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= +cloud.google.com/go v0.112.1 h1:uJSeirPke5UNZHIb4SxfZklVSiWWVqW4oXlETwZziwM= +cloud.google.com/go v0.112.1/go.mod h1:+Vbu+Y1UU+I1rjmzeMOb/8RfkKJK2Gyxi1X6jJCZLo4= +cloud.google.com/go v0.116.0 h1:B3fRrSDkLRt5qSHWe40ERJvhvnQwdZiHu0bJOpldweE= +cloud.google.com/go v0.116.0/go.mod h1:cEPSRWPzZEswwdr9BxE6ChEn01dWlTaF05LiC2Xs70U= +cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= +cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= +cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= +cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= +cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= +cloud.google.com/go/accesscontextmanager v1.6.0/go.mod h1:8XCvZWfYw3K/ji0iVnp+6pu7huxoQTLmxAbVjbloTtM= +cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= +cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= +cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= +cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= +cloud.google.com/go/aiplatform v1.35.0/go.mod h1:7MFT/vCaOyZT/4IIFfxH4ErVg/4ku6lKv3w0+tFTgXQ= +cloud.google.com/go/aiplatform v1.36.1/go.mod h1:WTm12vJRPARNvJ+v6P52RDHCNe4AhvjcIZ/9/RRHy/k= +cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= +cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= +cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= +cloud.google.com/go/analytics v0.17.0/go.mod h1:WXFa3WSym4IZ+JiKmavYdJwGG/CvpqiqczmL59bTD9M= +cloud.google.com/go/analytics v0.18.0/go.mod h1:ZkeHGQlcIPkw0R/GW+boWHhCOR43xz9RN/jn7WcqfIE= +cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= +cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= +cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= +cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= +cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= +cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= +cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= +cloud.google.com/go/apigeeregistry v0.4.0/go.mod h1:EUG4PGcsZvxOXAdyEghIdXwAEi/4MEaoqLMLDMIwKXY= +cloud.google.com/go/apigeeregistry v0.5.0/go.mod h1:YR5+s0BVNZfVOUkMa5pAR2xGd0A473vA5M7j247o1wM= +cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= +cloud.google.com/go/apikeys v0.4.0/go.mod h1:XATS/yqZbaBK0HOssf+ALHp8jAlNHUgyfprvNcBIszU= +cloud.google.com/go/apikeys v0.5.0/go.mod h1:5aQfwY4D+ewMMWScd3hm2en3hCj+BROlyrt3ytS7KLI= +cloud.google.com/go/apikeys v0.6.0/go.mod h1:kbpXu5upyiAlGkKrJgQl8A0rKNNJ7dQ377pdroRSSi8= +cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= +cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= +cloud.google.com/go/appengine v1.6.0/go.mod h1:hg6i0J/BD2cKmDJbaFSYHFyZkgBEfQrDg/X0V5fJn84= +cloud.google.com/go/appengine v1.7.0/go.mod h1:eZqpbHFCqRGa2aCdope7eC0SWLV1j0neb/QnMJVWx6A= +cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= +cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= +cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= +cloud.google.com/go/area120 v0.7.0/go.mod h1:a3+8EUD1SX5RUcCs3MY5YasiO1z6yLiNLRiFrykbynY= +cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= +cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= +cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= +cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= +cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= +cloud.google.com/go/artifactregistry v1.11.1/go.mod h1:lLYghw+Itq9SONbCa1YWBoWs1nOucMH0pwXN1rOBZFI= +cloud.google.com/go/artifactregistry v1.11.2/go.mod h1:nLZns771ZGAwVLzTX/7Al6R9ehma4WUEhZGWV6CeQNQ= +cloud.google.com/go/artifactregistry v1.12.0/go.mod h1:o6P3MIvtzTOnmvGagO9v/rOjjA0HmhJ+/6KAXrmYDCI= +cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= +cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= +cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= +cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= +cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= +cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= +cloud.google.com/go/asset v1.11.1/go.mod h1:fSwLhbRvC9p9CXQHJ3BgFeQNM4c9x10lqlrdEUYXlJo= +cloud.google.com/go/asset v1.12.0/go.mod h1:h9/sFOa4eDIyKmH6QMpm4eUK3pDojWnUhTgJlk762Hg= +cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= +cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= +cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= +cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= +cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= +cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= +cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= +cloud.google.com/go/auth v0.13.0 h1:8Fu8TZy167JkW8Tj3q7dIkr2v4cndv41ouecJx0PAHs= +cloud.google.com/go/auth v0.13.0/go.mod h1:COOjD9gwfKNKz+IIduatIhYJQIc0mG3H102r/EMxX6Q= +cloud.google.com/go/auth/oauth2adapt v0.2.6 h1:V6a6XDu2lTwPZWOawrAa9HUK+DB2zfJyTuciBG5hFkU= +cloud.google.com/go/auth/oauth2adapt v0.2.6/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8= +cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= +cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= +cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= +cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= +cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= +cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= +cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= +cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= +cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= +cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= +cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= +cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= +cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= +cloud.google.com/go/beyondcorp v0.4.0/go.mod h1:3ApA0mbhHx6YImmuubf5pyW8srKnCEPON32/5hj+RmM= +cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= +cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= +cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= +cloud.google.com/go/bigquery v1.47.0/go.mod h1:sA9XOgy0A8vQK9+MWhEQTY6Tix87M/ZurWFIxmF9I/E= +cloud.google.com/go/bigquery v1.48.0/go.mod h1:QAwSz+ipNgfL5jxiaK7weyOhzdoAy1zFm0Nf1fysJac= +cloud.google.com/go/bigquery v1.49.0/go.mod h1:Sv8hMmTFFYBlt/ftw2uN6dFdQPzBlREY9yBh7Oy7/4Q= +cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= +cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= +cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= +cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= +cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= +cloud.google.com/go/billing v1.12.0/go.mod h1:yKrZio/eu+okO/2McZEbch17O5CB5NpZhhXG6Z766ss= +cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= +cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= +cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= +cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= +cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= +cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= +cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= +cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= +cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= +cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= +cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= +cloud.google.com/go/channel v1.11.0/go.mod h1:IdtI0uWGqhEeatSB62VOoJ8FSUhJ9/+iGkJVqp74CGE= +cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= +cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= +cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= +cloud.google.com/go/cloudbuild v1.6.0/go.mod h1:UIbc/w9QCbH12xX+ezUsgblrWv+Cv4Tw83GiSMHOn9M= +cloud.google.com/go/cloudbuild v1.7.0/go.mod h1:zb5tWh2XI6lR9zQmsm1VRA+7OCuve5d8S+zJUul8KTg= +cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= +cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= +cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= +cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= +cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= +cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= +cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= +cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= +cloud.google.com/go/cloudtasks v1.9.0/go.mod h1:w+EyLsVkLWHcOaqNEyvcKAsWp9p29dL6uL9Nst1cI7Y= +cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= +cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= +cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= +cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= +cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= +cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= +cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= +cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= +cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= +cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= +cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= +cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= +cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= +cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= +cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= +cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= +cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= +cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= +cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= +cloud.google.com/go/container v1.13.1/go.mod h1:6wgbMPeQRw9rSnKBCAJXnds3Pzj03C4JHamr8asWKy4= +cloud.google.com/go/container v1.14.0/go.mod h1:3AoJMPhHfLDxLvrlVWaK57IXzaPnLaZq63WX59aQBfM= +cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= +cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= +cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= +cloud.google.com/go/containeranalysis v0.7.0/go.mod h1:9aUL+/vZ55P2CXfuZjS4UjQ9AgXoSw8Ts6lemfmxBxI= +cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= +cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= +cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= +cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= +cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= +cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= +cloud.google.com/go/datacatalog v1.8.1/go.mod h1:RJ58z4rMp3gvETA465Vg+ag8BGgBdnRPEMMSTr5Uv+M= +cloud.google.com/go/datacatalog v1.12.0/go.mod h1:CWae8rFkfp6LzLumKOnmVh4+Zle4A3NXLzVJ1d1mRm0= +cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= +cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= +cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= +cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= +cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= +cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= +cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= +cloud.google.com/go/dataform v0.6.0/go.mod h1:QPflImQy33e29VuapFdf19oPbE4aYTJxr31OAPV+ulA= +cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= +cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= +cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= +cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= +cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= +cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= +cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= +cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= +cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= +cloud.google.com/go/dataplex v1.5.2/go.mod h1:cVMgQHsmfRoI5KFYq4JtIBEUbYwc3c7tXmIDhRmNNVQ= +cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= +cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= +cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= +cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= +cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= +cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= +cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= +cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= +cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= +cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= +cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= +cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= +cloud.google.com/go/datastream v1.6.0/go.mod h1:6LQSuswqLa7S4rPAOZFVjHIG3wJIjZcZrw8JDEDJuIs= +cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= +cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= +cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= +cloud.google.com/go/deploy v1.6.0/go.mod h1:f9PTHehG/DjCom3QH0cntOVRm93uGBDt2vKzAPwpXQI= +cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= +cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= +cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= +cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= +cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= +cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= +cloud.google.com/go/dialogflow v1.29.0/go.mod h1:b+2bzMe+k1s9V+F2jbJwpHPzrnIyHihAdRFMtn2WXuM= +cloud.google.com/go/dialogflow v1.31.0/go.mod h1:cuoUccuL1Z+HADhyIA7dci3N5zUssgpBJmCzI6fNRB4= +cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= +cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= +cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= +cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= +cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= +cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= +cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= +cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= +cloud.google.com/go/documentai v1.16.0/go.mod h1:o0o0DLTEZ+YnJZ+J4wNfTxmDVyrkzFvttBXXtYRMHkM= +cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= +cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= +cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= +cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= +cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= +cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= +cloud.google.com/go/edgecontainer v0.3.0/go.mod h1:FLDpP4nykgwwIfcLt6zInhprzw0lEi2P1fjO6Ie0qbc= +cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= +cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= +cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= +cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= +cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= +cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= +cloud.google.com/go/eventarc v1.10.0/go.mod h1:u3R35tmZ9HvswGRBnF48IlYgYeBcPUCjkr4BTdem2Kw= +cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= +cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= +cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= +cloud.google.com/go/filestore v1.5.0/go.mod h1:FqBXDWBp4YLHqRnVGveOkHDf8svj9r5+mUDLupOWEDs= +cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= +cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= +cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= +cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= +cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= +cloud.google.com/go/functions v1.10.0/go.mod h1:0D3hEOe3DbEvCXtYOZHQZmD+SzYsi1YbI7dGvHfldXw= +cloud.google.com/go/functions v1.12.0/go.mod h1:AXWGrF3e2C/5ehvwYo/GH6O5s09tOPksiKhz+hH8WkA= +cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= +cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= +cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= +cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= +cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= +cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= +cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= +cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= +cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= +cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= +cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= +cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= +cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= +cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= +cloud.google.com/go/gkehub v0.11.0/go.mod h1:JOWHlmN+GHyIbuWQPl47/C2RFhnFKH38jH9Ascu3n0E= +cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= +cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= +cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= +cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= +cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= +cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= +cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= +cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= +cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= +cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= +cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= +cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= +cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= +cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= +cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY= +cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= +cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= +cloud.google.com/go/iam v1.1.6 h1:bEa06k05IO4f4uJonbB5iAgKTPpABy1ayxaIZV/GHVc= +cloud.google.com/go/iam v1.1.6/go.mod h1:O0zxdPeGBoFdWW3HWmBxJsk0pfvNM/p/qa82rWOGTwI= +cloud.google.com/go/iam v1.2.2 h1:ozUSofHUGf/F4tCNy/mu9tHLTaxZFLOUiKzjcgWHGIA= +cloud.google.com/go/iam v1.2.2/go.mod h1:0Ys8ccaZHdI1dEUilwzqng/6ps2YB6vRsjIe00/+6JY= +cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= +cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= +cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk= +cloud.google.com/go/iap v1.7.0/go.mod h1:beqQx56T9O1G1yNPph+spKpNibDlYIiIixiqsQXxLIo= +cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= +cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= +cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= +cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= +cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= +cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= +cloud.google.com/go/iot v1.5.0/go.mod h1:mpz5259PDl3XJthEmh9+ap0affn/MqNSP4My77Qql9o= +cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= +cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= +cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= +cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= +cloud.google.com/go/kms v1.8.0/go.mod h1:4xFEhYFqvW+4VMELtZyxomGSYtSQKzM178ylFW4jMAg= +cloud.google.com/go/kms v1.9.0/go.mod h1:qb1tPTgfF9RQP8e1wq4cLFErVuTJv7UsSC915J8dh3w= +cloud.google.com/go/kms v1.10.0/go.mod h1:ng3KTUtQQU9bPX3+QGLsflZIHlkbn8amFAMY63m8d24= +cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= +cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= +cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= +cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= +cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= +cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= +cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= +cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= +cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= +cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= +cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= +cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= +cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= +cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= +cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= +cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= +cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= +cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= +cloud.google.com/go/maps v0.6.0/go.mod h1:o6DAMMfb+aINHz/p/jbcY+mYeXBoZoxTfdSQ8VAJaCw= +cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= +cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= +cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= +cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= +cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= +cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= +cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= +cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= +cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= +cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= +cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= +cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= +cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= +cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= +cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= +cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= +cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w= +cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= +cloud.google.com/go/monitoring v1.21.2 h1:FChwVtClH19E7pJ+e0xUhJPGksctZNVOk2UhMmblmdU= +cloud.google.com/go/monitoring v1.21.2/go.mod h1:hS3pXvaG8KgWTSz+dAdyzPrGUYmi2Q+WFX8g2hqVEZU= +cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= +cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= +cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= +cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= +cloud.google.com/go/networkconnectivity v1.10.0/go.mod h1:UP4O4sWXJG13AqrTdQCD9TnLGEbtNRqjuaaA7bNjF5E= +cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= +cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= +cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= +cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= +cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= +cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= +cloud.google.com/go/networksecurity v0.7.0/go.mod h1:mAnzoxx/8TBSyXEeESMy9OOYwo1v+gZ5eMRnsT5bC8k= +cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= +cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= +cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= +cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= +cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= +cloud.google.com/go/notebooks v1.7.0/go.mod h1:PVlaDGfJgj1fl1S3dUwhFMXFgfYGhYQt2164xOMONmE= +cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= +cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= +cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= +cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= +cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= +cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= +cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= +cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= +cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= +cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= +cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= +cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= +cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= +cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= +cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= +cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= +cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= +cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= +cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= +cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= +cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= +cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= +cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= +cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= +cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= +cloud.google.com/go/policytroubleshooter v1.5.0/go.mod h1:Rz1WfV+1oIpPdN2VvvuboLVRsB1Hclg3CKQ53j9l8vw= +cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= +cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= +cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= +cloud.google.com/go/privatecatalog v0.7.0/go.mod h1:2s5ssIFO69F5csTXcwBP7NPFTZvps26xGzvQ2PQaBYg= +cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI= +cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= +cloud.google.com/go/pubsub v1.28.0/go.mod h1:vuXFpwaVoIPQMGXqRyUQigu/AX1S3IWugR9xznmcXX8= +cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= +cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= +cloud.google.com/go/pubsublite v1.6.0/go.mod h1:1eFCS0U11xlOuMFV/0iBqw3zP12kddMeCbj/F3FSj9k= +cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= +cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= +cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= +cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= +cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= +cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= +cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= +cloud.google.com/go/recaptchaenterprise/v2 v2.6.0/go.mod h1:RPauz9jeLtB3JVzg6nCbe12qNoaa8pXc4d/YukAmcnA= +cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= +cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= +cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= +cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= +cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= +cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= +cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= +cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= +cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= +cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= +cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= +cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= +cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= +cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= +cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= +cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= +cloud.google.com/go/resourcemanager v1.5.0/go.mod h1:eQoXNAiAvCf5PXxWxXjhKQoTMaUSNrEfg+6qdf/wots= +cloud.google.com/go/resourcemanager v1.6.0/go.mod h1:YcpXGRs8fDzcUl1Xw8uOVmI8JEadvhRIkoXXUNVYcVo= +cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= +cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= +cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= +cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= +cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= +cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= +cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= +cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= +cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= +cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= +cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= +cloud.google.com/go/run v0.8.0/go.mod h1:VniEnuBwqjigv0A7ONfQUaEItaiCRVujlMqerPPiktM= +cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= +cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= +cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= +cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= +cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= +cloud.google.com/go/scheduler v1.8.0/go.mod h1:TCET+Y5Gp1YgHT8py4nlg2Sew8nUHMqcpousDgXJVQc= +cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= +cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= +cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= +cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= +cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= +cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= +cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= +cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= +cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= +cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= +cloud.google.com/go/security v1.12.0/go.mod h1:rV6EhrpbNHrrxqlvW0BWAIawFWq3X90SduMJdFwtLB8= +cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= +cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= +cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= +cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= +cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= +cloud.google.com/go/securitycenter v1.18.1/go.mod h1:0/25gAzCM/9OL9vVx4ChPeM/+DlfGQJDwBy/UC8AKK0= +cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= +cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= +cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= +cloud.google.com/go/servicecontrol v1.10.0/go.mod h1:pQvyvSRh7YzUF2efw7H87V92mxU8FnFDawMClGCNuAA= +cloud.google.com/go/servicecontrol v1.11.0/go.mod h1:kFmTzYzTUIuZs0ycVqRHNaNhgR+UMUpw9n02l/pY+mc= +cloud.google.com/go/servicecontrol v1.11.1/go.mod h1:aSnNNlwEFBY+PWGQ2DoM0JJ/QUXqV5/ZD9DOLB7SnUk= +cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= +cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= +cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= +cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= +cloud.google.com/go/servicedirectory v1.8.0/go.mod h1:srXodfhY1GFIPvltunswqXpVxFPpZjf8nkKQT7XcXaY= +cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= +cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= +cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= +cloud.google.com/go/servicemanagement v1.6.0/go.mod h1:aWns7EeeCOtGEX4OvZUWCCJONRZeFKiptqKf1D0l/Jc= +cloud.google.com/go/servicemanagement v1.8.0/go.mod h1:MSS2TDlIEQD/fzsSGfCdJItQveu9NXnUniTrq/L8LK4= +cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= +cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= +cloud.google.com/go/serviceusage v1.5.0/go.mod h1:w8U1JvqUqwJNPEOTQjrMHkw3IaIFLoLsPLvsE3xueec= +cloud.google.com/go/serviceusage v1.6.0/go.mod h1:R5wwQcbOWsyuOfbP9tGdAnCAc6B9DRwPG1xtWMDeuPA= +cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= +cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= +cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= +cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= +cloud.google.com/go/spanner v1.44.0/go.mod h1:G8XIgYdOK+Fbcpbs7p2fiprDw4CaZX63whnSMLVBxjk= +cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= +cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= +cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= +cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= +cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= +cloud.google.com/go/speech v1.14.1/go.mod h1:gEosVRPJ9waG7zqqnsHpYTOoAS4KouMRLDFMekpJ0J0= +cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= +cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= +cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= +cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= +cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4= +cloud.google.com/go/storage v1.38.0 h1:Az68ZRGlnNTpIBbLjSMIV2BDcwwXYlRlQzis0llkpJg= +cloud.google.com/go/storage v1.38.0/go.mod h1:tlUADB0mAb9BgYls9lq+8MGkfzOXuLrnHXlpHmvFJoY= +cloud.google.com/go/storage v1.49.0 h1:zenOPBOWHCnojRd9aJZAyQXBYqkJkdQS42dxL55CIMw= +cloud.google.com/go/storage v1.49.0/go.mod h1:k1eHhhpLvrPjVGfo0mOUPEJ4Y2+a/Hv5PiwehZI9qGU= +cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= +cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= +cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4= +cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= +cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= +cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= +cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= +cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= +cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= +cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= +cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= +cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= +cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= +cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= +cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= +cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= +cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= +cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA= +cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= +cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= +cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= +cloud.google.com/go/translate v1.5.0/go.mod h1:29YDSYveqqpA1CQFD7NQuP49xymq17RXNaUDdc0mNu0= +cloud.google.com/go/translate v1.6.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= +cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= +cloud.google.com/go/video v1.12.0/go.mod h1:MLQew95eTuaNDEGriQdcYn0dTwf9oWiA4uYebxM5kdg= +cloud.google.com/go/video v1.13.0/go.mod h1:ulzkYlYgCp15N2AokzKjy7MQ9ejuynOJdf1tR5lGthk= +cloud.google.com/go/video v1.14.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= +cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= +cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= +cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= +cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= +cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= +cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= +cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= +cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= +cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= +cloud.google.com/go/vision/v2 v2.6.0/go.mod h1:158Hes0MvOS9Z/bDMSFpjwsUrZ5fPrdwuyyvKSGAGMY= +cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= +cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= +cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= +cloud.google.com/go/vmmigration v1.5.0/go.mod h1:E4YQ8q7/4W9gobHjQg4JJSgXXSgY21nA5r8swQV+Xxc= +cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= +cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= +cloud.google.com/go/vmwareengine v0.2.2/go.mod h1:sKdctNJxb3KLZkE/6Oui94iw/xs9PRNC2wnNLXsHvH8= +cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= +cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= +cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= +cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= +cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= +cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= +cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= +cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= +cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= +cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= +cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= +cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= +cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= +cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= +cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= +cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= +cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= +cuelang.org/go v0.10.0 h1:Y1Pu4wwga5HkXfLFK1sWAYaSWIBdcsr5Cb5AWj2pOuE= +cuelang.org/go v0.10.0/go.mod h1:HzlaqqqInHNiqE6slTP6+UtxT9hN6DAzgJgdbNxXvX8= +cuelang.org/go v0.13.2 h1:SagzeEASX4E2FQnRbItsqa33sSelrJjQByLqH9uZCE8= +cuelang.org/go v0.13.2/go.mod h1:8MoQXu+RcXsa2s9mebJN1HJ1orVDc9aI9/yKi6Dzsi4= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= +git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0= +github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= +github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg= +github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/CycloneDX/cyclonedx-go v0.9.1 h1:yffaWOZsv77oTJa/SdVZYdgAgFioCeycBUKkqS2qzQM= +github.com/CycloneDX/cyclonedx-go v0.9.1/go.mod h1:NE/EWvzELOFlG6+ljX/QeMlVt9VKcTwu8u0ccsACEsw= +github.com/CycloneDX/cyclonedx-go v0.9.2 h1:688QHn2X/5nRezKe2ueIVCt+NRqf7fl3AVQk+vaFcIo= +github.com/CycloneDX/cyclonedx-go v0.9.2/go.mod h1:vcK6pKgO1WanCdd61qx4bFnSsDJQ6SbM2ZuMIgq86Jg= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.26.0 h1:f2Qw/Ehhimh5uO1fayV0QIW7DShEQqhtUfhYc+cBPlw= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.26.0/go.mod h1:2bIszWvQRlJVmJLiuLhukLImRjKPcYdzzsx6darK02A= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 h1:UQ0AhxogsIRZDkElkblfnwjc3IaltCm2HUMvezQaL7s= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1/go.mod h1:jyqM3eLpJ3IbIFDTKVz2rF9T/xWGW0rIriGwnz8l9Tk= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1 h1:8nn+rsCvTq9axyEh382S0PFLBeaFwNsT43IrPWzctRU= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1/go.mod h1:viRWSEhtMZqz1rhwmOVKkWl6SwmVowfL9O2YR5gI2PE= +github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= +github.com/KeisukeYamashita/go-vcl v0.4.0 h1:dFxZq2yVeaCWBJAT7Oh9Z+Pp8y32i7b11QHdzsuBcsk= +github.com/KeisukeYamashita/go-vcl v0.4.0/go.mod h1:af2qGlXbsHDQN5abN7hyGNKtGhcFSaDdbLl4sfud+AU= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8= +github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= +github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= +github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= +github.com/agnivade/levenshtein v1.2.0 h1:U9L4IOT0Y3i0TIlUIDJ7rVUziKi/zPbrJGaFrtYH3SY= +github.com/agnivade/levenshtein v1.2.0/go.mod h1:QVVI16kDrtSuwcpd0p1+xMC6Z/VfhtCyDIjcwga4/DU= +github.com/agnivade/levenshtein v1.2.1 h1:EHBY3UOn1gwdy/VbFwgo4cxecRznFk7fKWN1KOX7eoM= +github.com/agnivade/levenshtein v1.2.1/go.mod h1:QVVI16kDrtSuwcpd0p1+xMC6Z/VfhtCyDIjcwga4/DU= +github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY= +github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGWcpt8ov532z81sp/kMMUG485J2InIOyADM= +github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092 h1:aM1rlcoLz8y5B2r4tTLMiVTrMtpfY0O8EScKJxaSaEc= +github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092/go.mod h1:rYqSE9HbjzpHTI74vwPvae4ZVYZd1lue2ta6xHPdblA= +github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= +github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0= +github.com/apache/arrow/go/v11 v11.0.0/go.mod h1:Eg5OsL5H+e299f7u5ssuXsuHQVEGC4xei5aX110hRiI= +github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= +github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= +github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= +github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= +github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= +github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= +github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= +github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/aws/aws-sdk-go v1.44.122/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= +github.com/aws/aws-sdk-go v1.49.6 h1:yNldzF5kzLBRvKlKz1S0bkvc2+04R1kt13KfBWQBfFA= +github.com/aws/aws-sdk-go v1.49.6/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/basgys/goxml2json v1.1.0 h1:4ln5i4rseYfXNd86lGEB+Vi652IsIXIvggKM/BhUKVw= +github.com/basgys/goxml2json v1.1.0/go.mod h1:wH7a5Np/Q4QoECFIU8zTQlZwZkrilY0itPfecMw41Dw= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= +github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= +github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/bufbuild/protocompile v0.6.0 h1:Uu7WiSQ6Yj9DbkdnOe7U4mNKp58y9WDMKDn28/ZlunY= +github.com/bufbuild/protocompile v0.6.0/go.mod h1:YNP35qEYoYGme7QMtz5SBCoN4kL4g12jTtjuzRNdjpE= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cheggaaa/pb v1.0.27/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42 h1:Om6kYQYDUk5wWbT0t0q6pvyM49i9XZAv9dDrkDA7gjk= +github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cockroachdb/apd/v3 v3.2.1 h1:U+8j7t0axsIgvQUqthuNm82HIrYXodOV2iWLWtEaIwg= +github.com/cockroachdb/apd/v3 v3.2.1/go.mod h1:klXJcjp+FffLTHlhIG69tezTDvdP065naDsHzKhYSqc= +github.com/containerd/typeurl/v2 v2.2.0 h1:6NBDbQzr7I5LHgp34xAXYF5DOTQDn05X58lsPEmzLso= +github.com/containerd/typeurl/v2 v2.2.0/go.mod h1:8XOOxnyatxSWuG8OfsZXVnAF4iZfedjS/8UHSPJnX4g= +github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++dYSw40= +github.com/containerd/typeurl/v2 v2.2.3/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= +github.com/envoyproxy/go-control-plane/envoy v1.32.4 h1:jb83lalDRZSpPWW2Z7Mck/8kXZ5CQAFYVjQcdVIr83A= +github.com/envoyproxy/go-control-plane/envoy v1.32.4/go.mod h1:Gzjc5k8JcJswLjAx1Zm+wSYE20UrLtt7JZMWiWQXQEw= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= +github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= +github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= +github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8= +github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-akka/configuration v0.0.0-20200606091224-a002c0330665 h1:Iz3aEheYgn+//VX7VisgCmF/wW3BMtXCLbvHV4jMQJA= +github.com/go-akka/configuration v0.0.0-20200606091224-a002c0330665/go.mod h1:19bUnum2ZAeftfwwLZ/wRe7idyfoW2MfmXO464Hrfbw= +github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= +github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= +github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/liberation v0.2.0/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= +github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE= +github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JSllnOrmmBOA= +github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= +github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpxhq9o2S/CELCSUxEWWAuoCUcVCQWv7G2OCk= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= +github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= +github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss= +github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/godoctor/godoctor v0.0.0-20181123222458-69df17f3a6f6/go.mod h1:+tyhT8jBF8E0XvdlSXOSL7Iko7DlNiongHq3q+wcsPs= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-jsonnet v0.20.0 h1:WG4TTSARuV7bSm4PMB4ohjxe33IHT5WVTrJSU33uT4g= +github.com/google/go-jsonnet v0.20.0/go.mod h1:VbgWF9JX7ztlv770x/TolZNGGFfiHEVx9G6ca2eUmeA= +github.com/google/go-jsonnet v0.21.0 h1:43Bk3K4zMRP/aAZm9Po2uSEjY6ALCkYUVIcz9HLGMvA= +github.com/google/go-jsonnet v0.21.0/go.mod h1:tCGAu8cpUpEZcdGMmdOu37nh8bGgqubhI5v2iSk3KJQ= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= +github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= +github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM= +github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= +github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= +github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= +github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw= +github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= +github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= +github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= +github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= +github.com/googleapis/gax-go/v2 v2.12.2 h1:mhN09QQW1jEWeMF74zGR81R30z4VJzjZsfkUhuHF+DA= +github.com/googleapis/gax-go/v2 v2.12.2/go.mod h1:61M8vcyyXR2kqKFxKrfA22jaA8JGF7Dc8App1U3H6jc= +github.com/googleapis/gax-go/v2 v2.14.1 h1:hb0FFeiPaQskmvakKu5EbCbpntQn48jyHuvrkurSS/Q= +github.com/googleapis/gax-go/v2 v2.14.1/go.mod h1:Hb/NubMaVM88SrNkvl8X/o8XWwDJEPqouaLeN2IUxoA= +github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-getter v1.7.6 h1:5jHuM+aH373XNtXl9TNTUH5Qd69Trve11tHIrB+6yj4= +github.com/hashicorp/go-getter v1.7.6/go.mod h1:W7TalhMmbPmsSMdNjD0ZskARur/9GJ17cfHTRtXV744= +github.com/hashicorp/go-getter v1.7.8 h1:mshVHx1Fto0/MydBekWan5zUipGq7jO0novchgMmSiY= +github.com/hashicorp/go-getter v1.7.8/go.mod h1:2c6CboOEb9jG6YvmC9xdD+tyAFsrUaJPedwXDGr0TM4= +github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo= +github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I= +github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= +github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/hcl/v2 v2.6.0/go.mod h1:bQTN5mpo+jewjJgh8jr0JUguIi7qPHUF6yIfAEN3jqY= +github.com/hashicorp/hcl/v2 v2.17.0 h1:z1XvSUyXd1HP10U4lrLg5e0JMVz6CPaJvAgxM0KNZVY= +github.com/hashicorp/hcl/v2 v2.17.0/go.mod h1:gJyW2PTShkJqQBKpAmPO3yxMxIuoXkOF2TpqXzrQyx4= +github.com/hashicorp/hcl/v2 v2.23.0 h1:Fphj1/gCylPxHutVSEOf2fBOh1VE4AuLV7+kbJf3qos= +github.com/hashicorp/hcl/v2 v2.23.0/go.mod h1:62ZYHrXgPoX8xBnzl8QzbWq4dyDsDtfCRgIq1rbJEvA= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jstemmer/go-junit-report v1.0.0 h1:8X1gzZpR+nVQLAht+L/foqOeX2l9DTZoaIPbEQHxsds= +github.com/jstemmer/go-junit-report v1.0.0/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/k0kubun/pp v3.0.1+incompatible/go.mod h1:GWse8YhT0p8pT4ir3ZgBbfZild3tgzSScAn6HmfYukg= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= +github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= +github.com/logrusorgru/aurora v2.0.3+incompatible h1:tOpm7WcpBTn4fjmVfgpQq0EfczGlG91VSDkswnjF5A8= +github.com/logrusorgru/aurora v2.0.3+incompatible/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= +github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/magiconair/properties v1.8.10 h1:s31yESBquKXCV9a/ScB3ESkOjUYYv+X0rg8SYxI99mE= +github.com/magiconair/properties v1.8.10/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= +github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= +github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= +github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= +github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= +github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/moby/buildkit v0.16.0 h1:wOVBj1o5YNVad/txPQNXUXdelm7Hs/i0PUFjzbK0VKE= +github.com/moby/buildkit v0.16.0/go.mod h1:Xqx/5GlrqE1yIRORk0NSCVDFpQAU1WjlT6KHYZdisIQ= +github.com/moby/buildkit v0.23.2 h1:gt/dkfcpgTXKx+B9I310kV767hhVqTvEyxGgI3mqsGQ= +github.com/moby/buildkit v0.23.2/go.mod h1:iEjAfPQKIuO+8y6OcInInvzqTMiKMbb2RdJz1K/95a0= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/muhammadmuzzammil1998/jsonc v1.0.0 h1:8o5gBQn4ZA3NBA9DlTujCj2a4w0tqWrPVjDwhzkgTIs= +github.com/muhammadmuzzammil1998/jsonc v1.0.0/go.mod h1:saF2fIVw4banK0H4+/EuqfFLpRnoy5S+ECwTOCcRcSU= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= +github.com/open-policy-agent/conftest v0.56.0 h1:Q27Y45rdUHAOTjkeTbmHf2kWgW+DeFauZMaDjJm98YA= +github.com/open-policy-agent/conftest v0.56.0/go.mod h1:u4xu/0jtZnsenKf06J/tdm/7CtP8ODmZ/JsRPTDCXMg= +github.com/open-policy-agent/conftest v0.62.0 h1:mk6Kbf8WTGjI8byKd59GWjIGsOPr+dmiEwjyDEZMWhk= +github.com/open-policy-agent/conftest v0.62.0/go.mod h1:oX2ScMAaFCJ2f4bAy23GBibaUzn1b8lRs6gkhu4G+IA= +github.com/open-policy-agent/opa v0.69.0 h1:s2igLw2Z6IvGWGuXSfugWkVultDMsM9pXiDuMp7ckWw= +github.com/open-policy-agent/opa v0.69.0/go.mod h1:+qyXJGkpEJ6kpB1kGo8JSwHtVXbTdsGdQYPWWNYNj+4= +github.com/open-policy-agent/opa v1.6.0 h1:/S/cnNQJ2MUMNzizHPbisTWBHowmLkPrugY5jjkPlRQ= +github.com/open-policy-agent/opa v1.6.0/go.mod h1:zFmw4P+W62+CWGYRDDswfVYSCnPo6oYaktQnfIaRFC4= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= +github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= +github.com/owenrumney/go-sarif v1.1.1/go.mod h1:dNDiPlF04ESR/6fHlPyq7gHKmrM0sHUvAGjsoh8ZH0U= +github.com/owenrumney/go-sarif/v2 v2.3.3 h1:ubWDJcF5i3L/EIOER+ZyQ03IfplbSU1BLOE26uKQIIU= +github.com/owenrumney/go-sarif/v2 v2.3.3/go.mod h1:MSqMMx9WqlBSY7pXoOZWgEsVB4FDNfhcaXDA1j6Sr+w= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= +github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= +github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= +github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= +github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= +github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= +github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= +github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI= +github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= +github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= +github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.4.2 h1:YwD0ulJSJytLpiaWua0sBDusfsCZohxjxzVTYjwxfV8= +github.com/rivo/uniseg v0.4.2/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= +github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= +github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= +github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= +github.com/sagikazarmark/locafero v0.7.0 h1:5MqpDsTGNDhY8sGp0Aowyf0qKsPrhewaLSsFaodPcyo= +github.com/sagikazarmark/locafero v0.7.0/go.mod h1:2za3Cg5rMaTMoG/2Ulr9AwtFaIppKXTRYnozin4aB5k= +github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= +github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shteou/go-ignore v0.3.1 h1:/DVY4w06eKliWrbkwKfBHJgUleld+QAlmlQvfRQOigA= +github.com/shteou/go-ignore v0.3.1/go.mod h1:hMVyBe+qt5/Z11W/Fxxf86b5SuL8kM29xNWLYob9Vos= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spdx/gordf v0.0.0-20201111095634-7098f93598fb/go.mod h1:uKWaldnbMnjsSAXRurWqqrdyZen1R7kxl8TkmWk2OyM= +github.com/spdx/tools-golang v0.5.5 h1:61c0KLfAcNqAjlg6UNMdkwpMernhw3zVRwDZ2x9XOmk= +github.com/spdx/tools-golang v0.5.5/go.mod h1:MVIsXx8ZZzaRWNQpUDhC4Dud34edUYJYecciXgrw5vE= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= +github.com/spf13/afero v1.12.0 h1:UcOPyRBYczmFn6yvphxkn9ZEOY65cpwGKb5mL36mrqs= +github.com/spf13/afero v1.12.0/go.mod h1:ZTlWwG4/ahT8W7T0WQ5uYmjI9duaLQGy3Q2OAl4sk/4= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= +github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= +github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= +github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= +github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= +github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4= +github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4= +github.com/spiffe/go-spiffe/v2 v2.5.0 h1:N2I01KCUkv1FAjZXJMwh95KK1ZIQLYbPfhaxw8WS0hE= +github.com/spiffe/go-spiffe/v2 v2.5.0/go.mod h1:P+NxobPc6wXhVtINNtFjNWGBTreew1GBUCwT2wPmb7g= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes= +github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k= +github.com/tchap/go-patricia/v2 v2.3.2 h1:xTHFutuitO2zqKAQ5rCROYgUb7Or/+IC3fts9/Yc7nM= +github.com/tchap/go-patricia/v2 v2.3.2/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k= +github.com/tmccombs/hcl2json v0.3.1 h1:Pf+Lb9OpZ5lkQuIC0BB5txdCQskZ2ud/l8sz/Nkjf3A= +github.com/tmccombs/hcl2json v0.3.1/go.mod h1:ljY0/prd2IFUF3cagQjV3cpPEEQKzqyGqnKI7m5DBVY= +github.com/tmccombs/hcl2json v0.6.7 h1:RYKTs4kd/gzRsEiv7J3M2WQ7TYRYZVc+0H0pZdERkxA= +github.com/tmccombs/hcl2json v0.6.7/go.mod h1:lJgBOOGDpbhjvdG2dLaWsqB4KBzul2HytfDTS3H465o= +github.com/tonistiigi/go-csvvalue v0.0.0-20240710180619-ddb21b71c0b4 h1:7I5c2Ig/5FgqkYOh/N87NzoyI9U15qUPXhDD8uCupv8= +github.com/tonistiigi/go-csvvalue v0.0.0-20240710180619-ddb21b71c0b4/go.mod h1:278M4p8WsNh3n4a1eqiFcV2FGk7wE5fwUpUom9mK9lE= +github.com/tonistiigi/go-csvvalue v0.0.0-20240814133006-030d3b2625d0 h1:2f304B10LaZdB8kkVEaoXvAMVan2tl9AiK4G0odjQtE= +github.com/tonistiigi/go-csvvalue v0.0.0-20240814133006-030d3b2625d0/go.mod h1:278M4p8WsNh3n4a1eqiFcV2FGk7wE5fwUpUom9mK9lE= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= +github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/vektah/gqlparser v1.2.0/go.mod h1:bkVf0FX+Stjg/MHnm8mEyubuaArhNEqfQhF+OTiAL74= +github.com/vektah/gqlparser/v2 v2.5.28 h1:bIulcl3LF69ba6EiZVGD88y4MkM+Jxrf3P2MX8xLRkY= +github.com/vektah/gqlparser/v2 v2.5.28/go.mod h1:D1/VCZtV3LPnQrcPBeR/q5jkSQIPti0uYCP/RI0gIeo= +github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= +github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= +github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= +github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yashtewari/glob-intersection v0.2.0 h1:8iuHdN88yYuCzCdjt0gDe+6bAhUwBeEWqThExu54RFg= +github.com/yashtewari/glob-intersection v0.2.0/go.mod h1:LK7pIC3piUjovexikBbJ26Yml7g8xa5bsjfx2v1fwok= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= +github.com/zclconf/go-cty v1.6.1/go.mod h1:VDR4+I79ubFBGm1uJac1226K5yANQFHeauxPBoP54+o= +github.com/zclconf/go-cty v1.10.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= +github.com/zclconf/go-cty v1.13.2 h1:4GvrUxe/QUDYuJKAav4EYqdM47/kZa672LwmXFmEKT0= +github.com/zclconf/go-cty v1.13.2/go.mod h1:YKQzy/7pZ7iq2jNFzy5go57xdxdWoLLpaEp4u238AE0= +github.com/zclconf/go-cty v1.16.2 h1:LAJSwc3v81IRBZyUVQDUdZ7hs3SYs9jv0eZJDWHD/70= +github.com/zclconf/go-cty v1.16.2/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= +github.com/zeebo/errs v1.4.0 h1:XNdoD/RRMKP7HD0UhJnIzUy74ISdGGxURlYG8HSWSfM= +github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= +github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/detectors/gcp v1.34.0 h1:JRxssobiPg23otYU5SbWtQC//snGVIM3Tx6QRzlQBao= +go.opentelemetry.io/contrib/detectors/gcp v1.34.0/go.mod h1:cV4BMFcscUR/ckqLkbfQmF0PRsq8w/lMGzdbCSveBHo= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 h1:x7wzEgXfnzJcHDwStJT+mxOz4etr2EcexjqhBvmoakw= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0/go.mod h1:rg+RlpR5dKwaS95IyyZqj5Wd4E13lk/msnTS0Xl9lJM= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q= +go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= +go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= +go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg= +go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E= +go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= +go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= +go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE= +go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs= +go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= +go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= +go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs= +go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY= +go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis= +go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4= +go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= +go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w= +go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= +go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= +golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= +golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= +golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8= +golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= +golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= +golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= +golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 h1:R84qjqJb5nVJMxqWYb3np9L5ZsaDtB+a39EqjV0JSUM= +golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0/go.mod h1:S9Xr4PYopiDyqSyp5NjCrhFrqg6A5zA2E/iPHPhqnS8= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210607152325-775e3b0c77b9/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20211028202545-6944b10bf410/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20220302094943-723b81ca9867/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= +golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= +golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= +golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY= +golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.1.0/go.mod h1:G9FE4dLTsbXUu90h/Pf85g4w1D+SSAgR+q46nJZ8M4A= +golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= +golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= +golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= +golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= +golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA= +golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.29.0 h1:WdYw2tdTK1S8olAzWHdgeqfy+Mtm9XNhv/xJsY65d98= +golang.org/x/oauth2 v0.29.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ= +golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= +golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= +golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= +golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4= +golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= +golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0= +golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191127201027-ecd32218bd7f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.32.0 h1:Q7N1vhpkQv7ybVzLFtTjvQya2ewbwNDZzUgfXGqtMWU= +golang.org/x/tools v0.32.0/go.mod h1:ZxrU41P/wAbZD8EDa6dDCa6XfpkhJ7HFMjHJXfBDu8s= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= +gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= +gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= +gonum.org/v1/plot v0.10.1/go.mod h1:VZW5OlhkL1mysU9vaqNHnsy86inf6Ot+jB3r+BczCEo= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= +google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= +google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= +google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= +google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= +google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= +google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= +google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= +google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= +google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= +google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= +google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= +google.golang.org/api v0.106.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.107.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= +google.golang.org/api v0.111.0/go.mod h1:qtFHvU9mhgTJegR31csQ+rwxyUTHOKFqCKWp1J0fdw0= +google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= +google.golang.org/api v0.169.0 h1:QwWPy71FgMWqJN/l6jVlFHUa29a7dcUy02I8o799nPY= +google.golang.org/api v0.169.0/go.mod h1:gpNOiMA2tZ4mf5R9Iwf4rK/Dcz0fbdIgWYWVoxmsyLg= +google.golang.org/api v0.215.0 h1:jdYF4qnyczlEz2ReWIsosNLDuzXyvFHJtI5gcr0J7t0= +google.golang.org/api v0.215.0/go.mod h1:fta3CVtuJYOEdugLNWm6WodzOS8KdFckABwN4I40hzY= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= +google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= +google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= +google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= +google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221025140454-527a21cfbd71/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= +google.golang.org/genproto v0.0.0-20221109142239-94d6d90a7d66/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201204527-e3fa12d562f3/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= +google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230112194545-e10362b5ecf9/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230113154510-dbe35b8444a5/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230123190316-2c411cf9d197/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230127162408-596548ed4efa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230216225411-c8e22ba71e44/go.mod h1:8B0gmkoRebU8ukX6HP+4wrVQUY1+6PkQ44BSyIlflHA= +google.golang.org/genproto v0.0.0-20230222225845-10f96fb3dbec/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= +google.golang.org/genproto v0.0.0-20230223222841-637eb2293923/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= +google.golang.org/genproto v0.0.0-20230303212802-e74f57abe488/go.mod h1:TvhZT5f700eVlTNwND1xoEZQeWTB2RY/65kplwl/bFA= +google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230320184635-7606e756e683/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230323212658-478b75c54725/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230330154414-c0448cd141ea/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= +google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY= +google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo= +google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 h1:ToEetK57OidYuqD4Q5w+vfEnPvPpuTwedCNVohYJfNk= +google.golang.org/genproto v0.0.0-20241118233622-e639e219e697/go.mod h1:JJrvXBWRZaFMxBufik1a4RpFw4HhgVtBBWQeQgUj2cc= +google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 h1:wKguEg1hsxI2/L3hUYrpo1RVi48K+uTyzKqprwLXsb8= +google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142/go.mod h1:d6be+8HhtEtucleCbxpPW9PA9XwISACu8nvpPqF0BVo= +google.golang.org/genproto/googleapis/api v0.0.0-20250519155744-55703ea1f237 h1:Kog3KlB4xevJlAcbbbzPfRG0+X9fdoGM+UBRKVz6Wr0= +google.golang.org/genproto/googleapis/api v0.0.0-20250519155744-55703ea1f237/go.mod h1:ezi0AVyMKDWy5xAncvjLWH7UcLBB5n7y2fQ8MzjJcto= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 h1:e7S5W7MGGLaSu8j3YjdezkZ+m1/Nm0uRVRMEMGk26Xs= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237 h1:cJfm9zPbe1e873mHJzmQ1nwVEeRDU/T1wXDK2kUSU34= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= +google.golang.org/grpc v1.52.3/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= +google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= +google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= +google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= +google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw= +google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= +google.golang.org/grpc v1.72.2 h1:TdbGzwb82ty4OusHWepvFWGLgIbNo1/SUynEN0ssqv8= +google.golang.org/grpc v1.72.2/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= +google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/cheggaaa/pb.v1 v1.0.27/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= +lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.36.2/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.36.3/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/ccgo/v3 v3.0.0-20220428102840-41399a37e894/go.mod h1:eI31LL8EwEBKPpNpA4bU1/i+sKOwOrQy8D87zWUcRZc= +modernc.org/ccgo/v3 v3.0.0-20220430103911-bc99d88307be/go.mod h1:bwdAnOoaIt8Ax9YdWGjxWsdkPcZyRPHqrOvJxaKAKGw= +modernc.org/ccgo/v3 v3.16.4/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= +modernc.org/ccgo/v3 v3.16.6/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= +modernc.org/ccgo/v3 v3.16.8/go.mod h1:zNjwkizS+fIFDrDjIAgBSCLkWbJuHF+ar3QRn+Z9aws= +modernc.org/ccgo/v3 v3.16.9/go.mod h1:zNMzC9A9xeNUepy6KuZBbugn3c0Mc9TeiJO4lgvkJDo= +modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= +modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= +modernc.org/libc v0.0.0-20220428101251-2d5f3daf273b/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= +modernc.org/libc v1.16.0/go.mod h1:N4LD6DBE9cf+Dzf9buBlzVJndKr/iJHG97vGLHYnb5A= +modernc.org/libc v1.16.1/go.mod h1:JjJE0eu4yeK7tab2n4S1w8tlWd9MxXLRzheaRnAKymU= +modernc.org/libc v1.16.17/go.mod h1:hYIV5VZczAmGZAnG15Vdngn5HSF5cSkbvfz2B7GRuVU= +modernc.org/libc v1.16.19/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= +modernc.org/libc v1.17.0/go.mod h1:XsgLldpP4aWlPlsjqKRdHPqCxCjISdHfM/yeWC5GyW0= +modernc.org/libc v1.17.1/go.mod h1:FZ23b+8LjxZs7XtFMbSzL/EhPxNbfZbErxEHc7cbD9s= +modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/memory v1.1.1/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= +modernc.org/memory v1.2.0/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= +modernc.org/memory v1.2.1/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/sqlite v1.18.1/go.mod h1:6ho+Gow7oX5V+OiOQ6Tr4xeqbx13UZ6t+Fw9IRUG4d4= +modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw= +modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= +modernc.org/tcl v1.13.1/go.mod h1:XOLfOwzhkljL4itZkK6T72ckMgvj0BDsnKNdZVUOecw= +modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8= +muzzammil.xyz/jsonc v1.0.0 h1:B6kaT3wHueZ87mPz3q1nFuM1BlL32IG0wcq0/uOsQ18= +muzzammil.xyz/jsonc v1.0.0/go.mod h1:rFv8tUUKe+QLh7v02BhfxXEf4ZHhYD7unR93HL/1Uvo= +olympos.io/encoding/edn v0.0.0-20201019073823-d3554ca0b0a3 h1:slmdOY3vp8a7KQbHkL+FLbvbkgMqmXojpFUO/jENuqQ= +olympos.io/encoding/edn v0.0.0-20201019073823-d3554ca0b0a3/go.mod h1:oVgVk4OWVDi43qWBEyGhXgYxt7+ED4iYNpTngSLX2Iw= +oras.land/oras-go/v2 v2.4.0 h1:i+Wt5oCaMHu99guBD0yuBjdLvX7Lz8ukPbwXdR7uBMs= +oras.land/oras-go/v2 v2.4.0/go.mod h1:osvtg0/ClRq1KkydMAEu/IxFieyjItcsQ4ut4PPF+f8= +oras.land/oras-go/v2 v2.6.0 h1:X4ELRsiGkrbeox69+9tzTu492FMUu7zJQW6eJU+I2oc= +oras.land/oras-go/v2 v2.6.0/go.mod h1:magiQDfG6H1O9APp+rOsvCPcW1GD2MM7vgnKY0Y+u1o= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/.bingo/controller-gen.mod b/.bingo/controller-gen.mod index 898ad469d..3a1d74ca3 100644 --- a/.bingo/controller-gen.mod +++ b/.bingo/controller-gen.mod @@ -1,5 +1,5 @@ module _ // Auto generated by https://github.com/bwplotka/bingo. DO NOT EDIT -go 1.24.6 +go 1.25.0 -require sigs.k8s.io/controller-tools v0.19.0 // cmd/controller-gen +require sigs.k8s.io/controller-tools v0.20.0 // cmd/controller-gen diff --git a/.bingo/controller-gen.sum b/.bingo/controller-gen.sum index 864468974..0c3abcb92 100644 --- a/.bingo/controller-gen.sum +++ b/.bingo/controller-gen.sum @@ -52,9 +52,14 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= +github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= +github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4= github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.7 h1:vN6T9TfwStFPFM5XzjsvmzZkLuaLX+HS+0SeFLRgU6M= github.com/spf13/pflag v1.0.7/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= @@ -68,6 +73,8 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= +go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= +go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -77,17 +84,23 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ= golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc= +golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI= +golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= +golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= +golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= +golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -95,22 +108,30 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= +golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= +golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= +golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg= golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s= +golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA= +golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/protobuf v1.36.7 h1:IgrO7UwFQGJdRNXH/sQux4R1Dj1WAKcLElzeeRaXV2A= google.golang.org/protobuf v1.36.7/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= +google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= @@ -122,24 +143,42 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= k8s.io/api v0.34.0 h1:L+JtP2wDbEYPUeNGbeSa/5GwFtIA662EmT2YSLOkAVE= k8s.io/api v0.34.0/go.mod h1:YzgkIzOOlhl9uwWCZNqpw6RJy9L2FK4dlJeayUoydug= +k8s.io/api v0.35.0 h1:iBAU5LTyBI9vw3L5glmat1njFK34srdLmktWwLTprlY= +k8s.io/api v0.35.0/go.mod h1:AQ0SNTzm4ZAczM03QH42c7l3bih1TbAXYo0DkF8ktnA= k8s.io/apiextensions-apiserver v0.34.0 h1:B3hiB32jV7BcyKcMU5fDaDxk882YrJ1KU+ZSkA9Qxoc= k8s.io/apiextensions-apiserver v0.34.0/go.mod h1:hLI4GxE1BDBy9adJKxUxCEHBGZtGfIg98Q+JmTD7+g0= +k8s.io/apiextensions-apiserver v0.35.0 h1:3xHk2rTOdWXXJM+RDQZJvdx0yEOgC0FgQ1PlJatA5T4= +k8s.io/apiextensions-apiserver v0.35.0/go.mod h1:E1Ahk9SADaLQ4qtzYFkwUqusXTcaV2uw3l14aqpL2LU= k8s.io/apimachinery v0.34.0 h1:eR1WO5fo0HyoQZt1wdISpFDffnWOvFLOOeJ7MgIv4z0= k8s.io/apimachinery v0.34.0/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= +k8s.io/apimachinery v0.35.0 h1:Z2L3IHvPVv/MJ7xRxHEtk6GoJElaAqDCCU0S6ncYok8= +k8s.io/apimachinery v0.35.0/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns= k8s.io/code-generator v0.34.0 h1:Ze2i1QsvUprIlX3oHiGv09BFQRLCz+StA8qKwwFzees= k8s.io/code-generator v0.34.0/go.mod h1:Py2+4w2HXItL8CGhks8uI/wS3Y93wPKO/9mBQUYNua0= +k8s.io/code-generator v0.35.0 h1:TvrtfKYZTm9oDF2z+veFKSCcgZE3Igv0svY+ehCmjHQ= +k8s.io/code-generator v0.35.0/go.mod h1:iS1gvVf3c/T71N5DOGYO+Gt3PdJ6B9LYSvIyQ4FHzgc= k8s.io/gengo/v2 v2.0.0-20250604051438-85fd79dbfd9f h1:SLb+kxmzfA87x4E4brQzB33VBbT2+x7Zq9ROIHmGn9Q= k8s.io/gengo/v2 v2.0.0-20250604051438-85fd79dbfd9f/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU= +k8s.io/gengo/v2 v2.0.0-20250922181213-ec3ebc5fd46b h1:gMplByicHV/TJBizHd9aVEsTYoJBnnUAT5MHlTkbjhQ= +k8s.io/gengo/v2 v2.0.0-20250922181213-ec3ebc5fd46b/go.mod h1:CgujABENc3KuTrcsdpGmrrASjtQsWCT7R99mEV4U/fM= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA= k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts= +k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE= +k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ= k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y= k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck= +k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/controller-tools v0.19.0 h1:OU7jrPPiZusryu6YK0jYSjPqg8Vhf8cAzluP9XGI5uk= sigs.k8s.io/controller-tools v0.19.0/go.mod h1:y5HY/iNDFkmFla2CfQoVb2AQXMsBk4ad84iR1PLANB0= +sigs.k8s.io/controller-tools v0.20.0 h1:VWZF71pwSQ2lZZCt7hFGJsOfDc5dVG28/IysjjMWXL8= +sigs.k8s.io/controller-tools v0.20.0/go.mod h1:b4qPmjGU3iZwqn34alUU5tILhNa9+VXK+J3QV0fT/uU= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= +sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg= +sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= diff --git a/.bingo/golangci-lint.mod b/.bingo/golangci-lint.mod index 4607edf92..5b5575d98 100644 --- a/.bingo/golangci-lint.mod +++ b/.bingo/golangci-lint.mod @@ -2,4 +2,4 @@ module _ // Auto generated by https://github.com/bwplotka/bingo. DO NOT EDIT go 1.24.6 -require github.com/golangci/golangci-lint/v2 v2.6.2 // cmd/golangci-lint +require github.com/golangci/golangci-lint/v2 v2.7.2 // cmd/golangci-lint diff --git a/.bingo/golangci-lint.sum b/.bingo/golangci-lint.sum index 3146c7150..b7d8399e3 100644 --- a/.bingo/golangci-lint.sum +++ b/.bingo/golangci-lint.sum @@ -65,6 +65,8 @@ github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1 github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/MirrexOne/unqueryvet v1.2.1 h1:M+zdXMq84g+E1YOLa7g7ExN3dWfZQrdDSTCM7gC+m/A= github.com/MirrexOne/unqueryvet v1.2.1/go.mod h1:IWwCwMQlSWjAIteW0t+28Q5vouyktfujzYznSIWiuOg= +github.com/MirrexOne/unqueryvet v1.3.0 h1:5slWSomgqpYU4zFuZ3NNOfOUxVPlXFDBPAVasZOGlAY= +github.com/MirrexOne/unqueryvet v1.3.0/go.mod h1:IWwCwMQlSWjAIteW0t+28Q5vouyktfujzYznSIWiuOg= github.com/OpenPeeDeeP/depguard/v2 v2.2.1 h1:vckeWVESWp6Qog7UZSARNqfu/cZqvki8zsuj3piCMx4= github.com/OpenPeeDeeP/depguard/v2 v2.2.1/go.mod h1:q4DKzC4UcVaAvcfd41CZh0PWpGgzrVxUYBlgKNGquUo= github.com/alecthomas/chroma/v2 v2.20.0 h1:sfIHpxPyR07/Oylvmcai3X/exDlE8+FA820NTz+9sGw= @@ -114,6 +116,8 @@ github.com/butuzov/mirror v1.3.0 h1:HdWCXzmwlQHdVhwvsfBb2Au0r3HyINry3bDWLYXiKoc= github.com/butuzov/mirror v1.3.0/go.mod h1:AEij0Z8YMALaq4yQj9CPPVYOyJQyiexpQEQgihajRfI= github.com/catenacyber/perfsprint v0.10.0 h1:AZj1mYyxbxLRqmnYOeguZXEQwWOgQGm2wzLI5d7Hl/0= github.com/catenacyber/perfsprint v0.10.0/go.mod h1:DJTGsi/Zufpuus6XPGJyKOTMELe347o6akPvWG9Zcsc= +github.com/catenacyber/perfsprint v0.10.1 h1:u7Riei30bk46XsG8nknMhKLXG9BcXz3+3tl/WpKm0PQ= +github.com/catenacyber/perfsprint v0.10.1/go.mod h1:DJTGsi/Zufpuus6XPGJyKOTMELe347o6akPvWG9Zcsc= github.com/ccojocar/zxcvbn-go v1.0.4 h1:FWnCIRMXPj43ukfX000kvBZvV6raSxakYr1nzyNrUcc= github.com/ccojocar/zxcvbn-go v1.0.4/go.mod h1:3GxGX+rHmueTUMvm5ium7irpyjmm7ikxYFOSJB21Das= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -209,6 +213,8 @@ github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/godoc-lint/godoc-lint v0.10.1 h1:ZPUVzlDtJfA+P688JfPJPkI/SuzcBr/753yGIk5bOPA= github.com/godoc-lint/godoc-lint v0.10.1/go.mod h1:KleLcHu/CGSvkjUH2RvZyoK1MBC7pDQg4NxMYLcBBsw= +github.com/godoc-lint/godoc-lint v0.10.2 h1:dksNgK+zebnVlj4Fx83CRnCmPO0qRat/9xfFsir1nfg= +github.com/godoc-lint/godoc-lint v0.10.2/go.mod h1:KleLcHu/CGSvkjUH2RvZyoK1MBC7pDQg4NxMYLcBBsw= github.com/gofrs/flock v0.13.0 h1:95JolYOvGMqeH31+FC7D2+uULf6mG61mEZ/A8dRYMzw= github.com/gofrs/flock v0.13.0/go.mod h1:jxeyy9R1auM5S6JYDBhDt+E2TCo7DkratH4Pgi8P+Z0= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -251,6 +257,8 @@ github.com/golangci/gofmt v0.0.0-20250106114630-d62b90e6713d h1:viFft9sS/dxoYY0a github.com/golangci/gofmt v0.0.0-20250106114630-d62b90e6713d/go.mod h1:ivJ9QDg0XucIkmwhzCDsqcnxxlDStoTl89jDMIoNxKY= github.com/golangci/golangci-lint/v2 v2.6.2 h1:jkMSVv36JmyTENcEertckvimvjPcD5qxNM7W7qhECvI= github.com/golangci/golangci-lint/v2 v2.6.2/go.mod h1:fSIMDiBt9kzdpnvvV7GO6iWzyv5uaeZ+iPor+2uRczE= +github.com/golangci/golangci-lint/v2 v2.7.2 h1:AhBC+YeEueec4AGlIbvPym5C70Thx0JykIqXbdIXWx0= +github.com/golangci/golangci-lint/v2 v2.7.2/go.mod h1:pDijleoBu7e8sejMqyZ3L5n6geqe+cVvOAz2QImqqVc= github.com/golangci/golines v0.0.0-20250217134842-442fd0091d95 h1:AkK+w9FZBXlU/xUmBtSJN1+tAI4FIvy5WtnUnY8e4p8= github.com/golangci/golines v0.0.0-20250217134842-442fd0091d95/go.mod h1:k9mmcyWKSTMcPPvQUCfRWWQ9VHJ1U9Dc0R7kaXAgtnQ= github.com/golangci/misspell v0.7.0 h1:4GOHr/T1lTW0hhR4tgaaV1WS/lJ+ncvYCoFKmqJsj0c= @@ -308,6 +316,8 @@ github.com/hashicorp/go-immutable-radix/v2 v2.1.0/go.mod h1:hgdqLXA4f6NIjRVisM1T github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.8.0 h1:KAkNb1HAiZd1ukkxDFGmokVZe1Xy9HG6NUp+bPle2i4= +github.com/hashicorp/go-version v1.8.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= @@ -394,6 +404,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0j github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mgechev/revive v1.12.0 h1:Q+/kkbbwerrVYPv9d9efaPGmAO/NsxwW/nE6ahpQaCU= github.com/mgechev/revive v1.12.0/go.mod h1:VXsY2LsTigk8XU9BpZauVLjVrhICMOV3k1lpB3CXrp8= +github.com/mgechev/revive v1.13.0 h1:yFbEVliCVKRXY8UgwEO7EOYNopvjb1BFbmYqm9hZjBM= +github.com/mgechev/revive v1.13.0/go.mod h1:efJfeBVCX2JUumNQ7dtOLDja+QKj9mYGgEZA7rt5u+0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= @@ -488,6 +500,8 @@ github.com/sashamelentyev/usestdlibvars v1.29.0 h1:8J0MoRrw4/NAXtjQqTHrbW9NN+3iM github.com/sashamelentyev/usestdlibvars v1.29.0/go.mod h1:8PpnjHMk5VdeWlVb4wCdrB8PNbLqZ3wBZTZWkrpZZL8= github.com/securego/gosec/v2 v2.22.10 h1:ntbBqdWXnu46DUOXn+R2SvPo3PiJCDugTCgTW2g4tQg= github.com/securego/gosec/v2 v2.22.10/go.mod h1:9UNjK3tLpv/w2b0+7r82byV43wCJDNtEDQMeS+H/g2w= +github.com/securego/gosec/v2 v2.22.11-0.20251204091113-daccba6b93d7 h1:rZg6IGn0ySYZwCX8LHwZoYm03JhG/cVAJJ3O+u3Vclo= +github.com/securego/gosec/v2 v2.22.11-0.20251204091113-daccba6b93d7/go.mod h1:9sr22NZO5Kfh7unW/xZxkGYTmj2484/fCiE54gw7UTY= github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -503,10 +517,14 @@ github.com/sourcegraph/go-diff v0.7.0 h1:9uLlrd5T46OXs5qpp8L/MTltk0zikUGi0sNNyCp github.com/sourcegraph/go-diff v0.7.0/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= github.com/spf13/afero v1.14.0 h1:9tH6MapGnn/j0eb0yIXiLjERO8RB6xIVZRDCX7PtqWA= github.com/spf13/afero v1.14.0/go.mod h1:acJQ8t0ohCGuMN3O+Pv0V0hgMxNYDlvdk+VTfyZmbYo= +github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= +github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= +github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= +github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= @@ -519,6 +537,8 @@ github.com/ssgreg/nlreturn/v2 v2.2.1 h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YE github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= github.com/stbenjam/no-sprintf-host-port v0.2.0 h1:i8pxvGrt1+4G0czLr/WnmyH7zbZ8Bg8etvARQ1rpyl4= github.com/stbenjam/no-sprintf-host-port v0.2.0/go.mod h1:eL0bQ9PasS0hsyTyfTjjG+E80QIyPnBVQbYZyv20Jfk= +github.com/stbenjam/no-sprintf-host-port v0.3.1 h1:AyX7+dxI4IdLBPtDbsGAyqiTSLpCP9hWRrXQDU4Cm/g= +github.com/stbenjam/no-sprintf-host-port v0.3.1/go.mod h1:ODbZesTCHMVKthBHskvUUexdcNHAQRXk9NpSsL8p/HQ= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= @@ -541,6 +561,8 @@ github.com/timonwong/loggercheck v0.11.0 h1:jdaMpYBl+Uq9mWPXv1r8jc5fC3gyXx4/WGwT github.com/timonwong/loggercheck v0.11.0/go.mod h1:HEAWU8djynujaAVX7QI65Myb8qgfcZ1uKbdpg3ZzKl8= github.com/tomarrell/wrapcheck/v2 v2.11.0 h1:BJSt36snX9+4WTIXeJ7nvHBQBcm1h2SjQMSlmQ6aFSU= github.com/tomarrell/wrapcheck/v2 v2.11.0/go.mod h1:wFL9pDWDAbXhhPZZt+nG8Fu+h29TtnZ2MW6Lx4BRXIU= +github.com/tomarrell/wrapcheck/v2 v2.12.0 h1:H/qQ1aNWz/eeIhxKAFvkfIA+N7YDvq6TWVFL27Of9is= +github.com/tomarrell/wrapcheck/v2 v2.12.0/go.mod h1:AQhQuZd0p7b6rfW+vUwHm5OMCGgp63moQ9Qr/0BpIWo= github.com/tommy-muehle/go-mnd/v2 v2.5.1 h1:NowYhSdyE/1zwK9QCLeRb6USWdoif80Ie+v+yU8u1Zw= github.com/tommy-muehle/go-mnd/v2 v2.5.1/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= github.com/ultraware/funlen v0.2.0 h1:gCHmCn+d2/1SemTdYMiKLAHFYxTYz7z9VIDRaTGyLkI= @@ -589,6 +611,8 @@ go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ= go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -642,6 +666,8 @@ golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA= golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w= +golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= +golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -756,6 +782,8 @@ golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ= golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= +golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= @@ -774,6 +802,8 @@ golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k= golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM= +golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= +golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -829,6 +859,8 @@ golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58 golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ= golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= +golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= +golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/.bingo/goreleaser.mod b/.bingo/goreleaser.mod index d4e6c3832..3fe6a4405 100644 --- a/.bingo/goreleaser.mod +++ b/.bingo/goreleaser.mod @@ -1,5 +1,5 @@ module _ // Auto generated by https://github.com/bwplotka/bingo. DO NOT EDIT -go 1.22.5 +go 1.24.6 -require github.com/goreleaser/goreleaser v1.26.2 +require github.com/goreleaser/goreleaser/v2 v2.11.2 diff --git a/.bingo/goreleaser.sum b/.bingo/goreleaser.sum index c5a6760d4..7d1df8e6f 100644 --- a/.bingo/goreleaser.sum +++ b/.bingo/goreleaser.sum @@ -1,199 +1,239 @@ +al.essio.dev/pkg/shellescape v1.6.0 h1:NxFcEqzFSEVCGN2yq7Huv/9hyCEGVa/TncnOOBBeXHA= +al.essio.dev/pkg/shellescape v1.6.0/go.mod h1:6sIqp7X2P6mThCQ7twERpZTuigpr6KbZWtls1U8I890= +cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= +cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.112.1 h1:uJSeirPke5UNZHIb4SxfZklVSiWWVqW4oXlETwZziwM= -cloud.google.com/go v0.112.1/go.mod h1:+Vbu+Y1UU+I1rjmzeMOb/8RfkKJK2Gyxi1X6jJCZLo4= -cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= -cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= -cloud.google.com/go/iam v1.1.6 h1:bEa06k05IO4f4uJonbB5iAgKTPpABy1ayxaIZV/GHVc= -cloud.google.com/go/iam v1.1.6/go.mod h1:O0zxdPeGBoFdWW3HWmBxJsk0pfvNM/p/qa82rWOGTwI= -cloud.google.com/go/kms v1.15.8 h1:szIeDCowID8th2i8XE4uRev5PMxQFqW+JjwYxL9h6xs= -cloud.google.com/go/kms v1.15.8/go.mod h1:WoUHcDjD9pluCg7pNds131awnH429QGvRM3N/4MyoVs= -cloud.google.com/go/storage v1.39.1 h1:MvraqHKhogCOTXTlct/9C3K3+Uy2jBmFYb3/Sp6dVtY= -cloud.google.com/go/storage v1.39.1/go.mod h1:xK6xZmxZmo+fyP7+DEF6FhNc24/JAe95OLyOHCXFH1o= -code.gitea.io/sdk/gitea v0.18.0 h1:+zZrwVmujIrgobt6wVBWCqITz6bn1aBjnCUHmpZrerI= -code.gitea.io/sdk/gitea v0.18.0/go.mod h1:IG9xZJoltDNeDSW0qiF2Vqx5orMWa7OhVWrjvrd5NpI= -dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= -dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +cloud.google.com/go v0.121.1 h1:S3kTQSydxmu1JfLRLpKtxRPA7rSrYPRPEUmL/PavVUw= +cloud.google.com/go v0.121.1/go.mod h1:nRFlrHq39MNVWu+zESP2PosMWA0ryJw8KUBZ2iZpxbw= +cloud.google.com/go/auth v0.16.1 h1:XrXauHMd30LhQYVRHLGvJiYeczweKQXZxsTbV9TiguU= +cloud.google.com/go/auth v0.16.1/go.mod h1:1howDHJ5IETh/LwYs3ZxvlkXF48aSqqJUM+5o02dNOI= +cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= +cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= +cloud.google.com/go/compute/metadata v0.7.0 h1:PBWF+iiAerVNe8UCHxdOt6eHLVc3ydFeOCw78U8ytSU= +cloud.google.com/go/compute/metadata v0.7.0/go.mod h1:j5MvL9PprKL39t166CoB1uVHfQMs4tFQZZcKwksXUjo= +cloud.google.com/go/iam v1.5.2 h1:qgFRAGEmd8z6dJ/qyEchAuL9jpswyODjA2lS+w234g8= +cloud.google.com/go/iam v1.5.2/go.mod h1:SE1vg0N81zQqLzQEwxL2WI6yhetBdbNQuTvIKCSkUHE= +cloud.google.com/go/kms v1.22.0 h1:dBRIj7+GDeeEvatJeTB19oYZNV0aj6wEqSIT/7gLqtk= +cloud.google.com/go/kms v1.22.0/go.mod h1:U7mf8Sva5jpOb4bxYZdtw/9zsbIjrklYwPcvMk34AL8= +cloud.google.com/go/longrunning v0.6.7 h1:IGtfDWHhQCgCjwQjV9iiLnUta9LBCo8R9QmAFsS/PrE= +cloud.google.com/go/longrunning v0.6.7/go.mod h1:EAFV3IZAKmM56TyiE6VAP3VoTzhZzySwI/YI1s/nRsY= +cloud.google.com/go/monitoring v1.24.2 h1:5OTsoJ1dXYIiMiuL+sYscLc9BumrL3CarVLL7dd7lHM= +cloud.google.com/go/monitoring v1.24.2/go.mod h1:x7yzPWcgDRnPEv3sI+jJGBkwl5qINf+6qY4eq0I9B4U= +cloud.google.com/go/storage v1.55.0 h1:NESjdAToN9u1tmhVqhXCaCwYBuvEhZLLv0gBr+2znf0= +cloud.google.com/go/storage v1.55.0/go.mod h1:ztSmTTwzsdXe5syLVS0YsbFxXuvEmEyZj7v7zChEmuY= +code.gitea.io/sdk/gitea v0.21.0 h1:69n6oz6kEVHRo1+APQQyizkhrZrLsTLXey9142pfkD4= +code.gitea.io/sdk/gitea v0.21.0/go.mod h1:tnBjVhuKJCn8ibdyyhvUyxrR1Ca2KHEoTWoukNhXQPA= +dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8= +dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA= +github.com/42wim/httpsig v1.2.2 h1:ofAYoHUNs/MJOLqQ8hIxeyz2QxOz8qdSVvp3PX/oPgA= +github.com/42wim/httpsig v1.2.2/go.mod h1:P/UYo7ytNBFwc+dg35IubuAUIs8zj5zzFIgUCEl55WY= github.com/AlekSi/pointer v1.2.0 h1:glcy/gc4h8HnG2Z3ZECSzZ1IX1x2JxRVuDzaJwQE0+w= github.com/AlekSi/pointer v1.2.0/go.mod h1:gZGfd3dpW4vEc/UlyfKKi1roIqcCgwOIvb0tSNSBle0= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0 h1:n1DH8TPV4qqPTje2RcUBYwtrTWlabVp4n46+74X2pn4= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0/go.mod h1:HDcZnuGbiyppErN6lB+idp4CKhjbc8gwjto6OPpyggM= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+6GTssUXdANk6aJ7T1ZxnsQ= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1/go.mod h1:h8hyGFDsU5HMivxiS2iYFZsgDbU9OnnJ163x5UGVKYo= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.1 h1:DSDNVxqkoXJiko6x8a90zidoYqnYYa6c1MTzDKzKkTo= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.1/go.mod h1:zGqV2R4Cr/k8Uye5w+dgQ06WJtEcbQG/8J7BB6hnCr4= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2 h1:F0gBpfdPLGsw+nsgk6aqqkZS1jiixa5WwFe3fk/T3Ys= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2/go.mod h1:SqINnQ9lVVdRlyC8cd1lCI0SdX4n2paeABd2K8ggfnE= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys v0.10.0 h1:m/sWOGCREuSBqg2htVQTBY8nOZpyajYztF0vUvSZTuM= github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys v0.10.0/go.mod h1:Pu5Zksi2KrU7LPbZbNINx6fuVrUp/ffvpxdDj+i8LeE= github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1 h1:FbH3BbSb4bvGluTesZZ+ttN/MDsnMmQP36OSnDuSXqw= github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1/go.mod h1:9V2j0jn9jDEkCkv8w/bKTNppX/d0FVA1ud77xCIP4KA= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.1 h1:fXPMAmuh0gDuRDey0atC8cXBuKIlqCzCkL8sm1n9Ov0= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.1/go.mod h1:SUZc9YRRHfx2+FAQKNDGrssXehqLpxmwRv2mC/5ntj4= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0 h1:UXT0o77lXQrikd1kgwIPQOUect7EoR/+sbP4wQKdzxM= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0/go.mod h1:cTvi54pg19DoT07ekoeMgE/taAwNtCShVeZqA+Iv2xI= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc= -github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw= -github.com/Azure/go-autorest/autorest v0.11.29/go.mod h1:ZtEzC4Jy2JDrZLxvWs8LrBWEBycl1hbT1eknI8MtfAs= +github.com/Azure/go-autorest/autorest v0.11.28/go.mod h1:MrkzG3Y3AH668QyF9KRk5neJnGgmhQ6krbhR8Q5eMvA= +github.com/Azure/go-autorest/autorest v0.11.30 h1:iaZ1RGz/ALZtN5eq4Nr1SOFSlf2E4pDI3Tcsl+dZPVE= +github.com/Azure/go-autorest/autorest v0.11.30/go.mod h1:t1kpPIOpIVX7annvothKvb0stsrXa37i7b+xpmBW8Fs= github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk= -github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8= -github.com/Azure/go-autorest/autorest/adal v0.9.23/go.mod h1:5pcMqFkdPhviJdlEy3kC/v1ZLnQl0MH6XA5YCcMhy4c= -github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 h1:wkAZRgT/pn8HhFyzfe9UnqOjJYqlembgCTi72Bm/xKk= -github.com/Azure/go-autorest/autorest/azure/auth v0.5.12/go.mod h1:84w/uV8E37feW2NCJ08uT9VBfjfUHpgLVnG2InYD6cg= -github.com/Azure/go-autorest/autorest/azure/cli v0.4.5/go.mod h1:ADQAXrkgm7acgWVUNamOgh8YNrv4p27l3Wc55oVfpzg= -github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 h1:w77/uPk80ZET2F+AfQExZyEWtn+0Rk/uw17m9fv5Ajc= +github.com/Azure/go-autorest/autorest/adal v0.9.24 h1:BHZfgGsGwdkHDyZdtQRQk1WeUdW0m2WPAwuHZwUi5i4= +github.com/Azure/go-autorest/autorest/adal v0.9.24/go.mod h1:7T1+g0PYFmACYW5LlG2fcoPiPlFHjClyRGL7dRlP5c8= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.13 h1:Ov8avRZi2vmrE2JcXw+tu5K/yB41r7xK9GZDiBF7NdM= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.13/go.mod h1:5BAVfWLWXihP47vYrPuBKKf4cS0bXI+KM9Qx6ETDJYo= github.com/Azure/go-autorest/autorest/azure/cli v0.4.6/go.mod h1:piCfgPho7BiIDdEQ1+g4VmKyD5y+p/XtSNqE6Hc4QD0= -github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.7 h1:Q9R3utmFg9K1B4OYtAZ7ZUUvIUdzQt7G2MN5Hi/d670= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.7/go.mod h1:bVrAueELJ0CKLBpUHDIvD516TwmHmzqwCpvONWRsw3s= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/date v0.3.1 h1:o9Z8Jyt+VJJTCZ/UORishuHOusBwolhjokt9s5k8I4w= +github.com/Azure/go-autorest/autorest/date v0.3.1/go.mod h1:Dz/RDmXlfiFFS/eW+b/xMUSFs1tboPVy6UjgADToWDM= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU= -github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= -github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= -github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= +github.com/Azure/go-autorest/autorest/to v0.4.1 h1:CxNHBqdzTr7rLtdrtb5CMjJcDut+WNGCVv7OmS5+lTc= +github.com/Azure/go-autorest/autorest/to v0.4.1/go.mod h1:EtaofgU4zmtvn1zT2ARsjRFdq9vXx0YWtmElwL+GZ9M= github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= +github.com/Azure/go-autorest/logger v0.2.2 h1:hYqBsEBywrrOSW24kkOCXRcKfKhK76OzLTfF+MYDE2o= +github.com/Azure/go-autorest/logger v0.2.2/go.mod h1:I5fg9K52o+iuydlWfa9T5K6WFos9XYr9dYTFzpqgibw= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/Azure/go-autorest/tracing v0.6.1 h1:YUMSrC/CeD1ZnnXcNYU4a/fzsO35u2Fsful9L/2nyR0= +github.com/Azure/go-autorest/tracing v0.6.1/go.mod h1:/3EgjbsjraOqiicERAeu3m7/z0x1TzjQGAwDrJrXGkc= +github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 h1:oygO0locgZJe7PpYPXT5A29ZkwJaPqcva7BVeemZOZs= +github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= -github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg= +github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 h1:ErKg/3iS1AKcTkf3yixlZ54f9U1rljCkQyEXWUnIUxc= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0/go.mod h1:yAZHSGnqScoU556rBOVkwLze6WP5N+U11RHuWaGVxwY= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0 h1:fYE9p3esPxA/C0rQ0AHhP0drtPXDRhaWiwg1DPqO7IU= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0/go.mod h1:BnBReJLvVYx2CS/UHOgVz2BXKXD9wsQPxZug20nZhd0= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0 h1:6/0iUd0xrnX7qt+mLNRwg5c0PGv8wpE8K90ryANQwMI= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0/go.mod h1:otE2jQekW/PqXk1Awf5lmfokJx4uwuqcj1ab5SpGeW0= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= -github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= -github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= -github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= -github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= +github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0= +github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= +github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs= +github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= -github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= -github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= -github.com/ProtonMail/go-crypto v1.0.0 h1:LRuvITjQWX+WIfr930YHG2HNfjR1uOfyf5vE0kC2U78= -github.com/ProtonMail/go-crypto v1.0.0/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= -github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVKJUX0= -github.com/alessio/shellescape v1.4.1/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30= -github.com/anchore/bubbly v0.0.0-20230518153401-87b6af8ccf22 h1:5NFK6VGgqBUOAX2SYyzFYvNdOiYDxzim8jga386FlZY= -github.com/anchore/bubbly v0.0.0-20230518153401-87b6af8ccf22/go.mod h1:Kv+Mm9CdtnV8iem48iEPIwy7/N4Wmk0hpxYNH5gTwKQ= -github.com/anchore/go-logger v0.0.0-20230725134548-c21dafa1ec5a h1:nJ2G8zWKASyVClGVgG7sfM5mwoZlZ2zYpIzN2OhjWkw= -github.com/anchore/go-logger v0.0.0-20230725134548-c21dafa1ec5a/go.mod h1:ubLFmlsv8/DFUQrZwY5syT5/8Er3ugSr4rDFwHsE3hg= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/ProtonMail/go-crypto v1.3.0 h1:ILq8+Sf5If5DCpHQp4PbZdS1J7HDFRXz/+xKBiRGFrw= +github.com/ProtonMail/go-crypto v1.3.0/go.mod h1:9whxjD8Rbs29b4XWbB8irEcE8KHMqaR2e7GWU1R+/PE= +github.com/agnivade/levenshtein v1.2.1 h1:EHBY3UOn1gwdy/VbFwgo4cxecRznFk7fKWN1KOX7eoM= +github.com/agnivade/levenshtein v1.2.1/go.mod h1:QVVI16kDrtSuwcpd0p1+xMC6Z/VfhtCyDIjcwga4/DU= +github.com/anchore/bubbly v0.0.0-20241107060245-f2a5536f366a h1:smr1CcMkgeMd6G75N+2OVNk/uHbX/WLR0bk+kMWEyr8= +github.com/anchore/bubbly v0.0.0-20241107060245-f2a5536f366a/go.mod h1:P5IrP8AhuzApVKa5H7k2hHX5pZA1uhyi+Z1VjK1EtA4= +github.com/anchore/go-logger v0.0.0-20241005132348-65b4486fbb28 h1:TKlTOayTJKpoLPJbeMykEwxCn0enACf06u0RSIdFG5w= +github.com/anchore/go-logger v0.0.0-20241005132348-65b4486fbb28/go.mod h1:5iJIa34inbIEFRwoWxNBTnjzIcl4G3le1LppPDmpg/4= github.com/anchore/go-macholibre v0.0.0-20220308212642-53e6d0aaf6fb h1:iDMnx6LIjtjZ46C0akqveX83WFzhpTD3eqOthawb5vU= github.com/anchore/go-macholibre v0.0.0-20220308212642-53e6d0aaf6fb/go.mod h1:DmTY2Mfcv38hsHbG78xMiTDdxFtkHpgYNVDPsF2TgHk= -github.com/anchore/quill v0.4.1 h1:mffDnvnER3ZgPjN5hexc3nr/4Y1dtKdDB6td5K8uInk= -github.com/anchore/quill v0.4.1/go.mod h1:t6hOPYDohN8wn2SRWQdNkJBkhmK8s3gzuHzzgcEvzQU= +github.com/anchore/quill v0.5.1 h1:+TAJroWuMC0AofI4gD9V9v65zR8EfKZg8u+ZD+dKZS4= +github.com/anchore/quill v0.5.1/go.mod h1:tAzfFxVluL2P1cT+xEy+RgQX1hpNuliUC5dTYSsnCLQ= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/atc0005/go-teams-notify/v2 v2.10.0 h1:eQvRIkyESQgBvlUdQ/iPol/lj3QcRyrdEQM3+c/nXhM= -github.com/atc0005/go-teams-notify/v2 v2.10.0/go.mod h1:SIeE1UfCcVRYMqP5b+r1ZteHyA/2UAjzWF5COnZ8q0w= -github.com/aws/aws-sdk-go v1.53.0 h1:MMo1x1ggPPxDfHMXJnQudTbGXYlD4UigUAud1DJxPVo= -github.com/aws/aws-sdk-go v1.53.0/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= -github.com/aws/aws-sdk-go-v2 v1.26.1 h1:5554eUqIYVWpU0YmeeYZ0wU64H2VLBs8TlhRB2L+EkA= -github.com/aws/aws-sdk-go-v2 v1.26.1/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.1 h1:gTK2uhtAPtFcdRRJilZPx8uJLL2J85xK11nKtWL0wfU= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.1/go.mod h1:sxpLb+nZk7tIfCWChfd+h4QwHNUR57d8hA1cleTkjJo= -github.com/aws/aws-sdk-go-v2/config v1.27.13 h1:WbKW8hOzrWoOA/+35S5okqO/2Ap8hkkFUzoW8Hzq24A= -github.com/aws/aws-sdk-go-v2/config v1.27.13/go.mod h1:XLiyiTMnguytjRER7u5RIkhIqS8Nyz41SwAWb4xEjxs= -github.com/aws/aws-sdk-go-v2/credentials v1.17.13 h1:XDCJDzk/u5cN7Aple7D/MiAhx1Rjo/0nueJ0La8mRuE= -github.com/aws/aws-sdk-go-v2/credentials v1.17.13/go.mod h1:FMNcjQrmuBYvOTZDtOLCIu0esmxjF7RuA/89iSXWzQI= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1 h1:FVJ0r5XTHSmIHJV6KuDmdYhEpvlHpiSd38RQWhut5J4= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1/go.mod h1:zusuAeqezXzAB24LGuzuekqMAEgWkVYukBec3kr3jUg= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.9 h1:vXY/Hq1XdxHBIYgBUmug/AbMyIe1AKulPYS2/VE1X70= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.9/go.mod h1:GyJJTZoHVuENM4TeJEl5Ffs4W9m19u+4wKJcDi/GZ4A= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5 h1:aw39xVGeRWlWx9EzGVnhOR4yOjQDHPQ6o6NmBlscyQg= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5/go.mod h1:FSaRudD0dXiMPK2UjknVwwTYyZMRsHv3TtkabsZih5I= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5 h1:PG1F3OD1szkuQPzDw3CIQsRIrtTlUC3lP84taWzHlq0= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5/go.mod h1:jU1li6RFryMz+so64PpKtudI+QzbKoIEivqdf6LNpOc= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.3 h1:mDnFOE2sVkyphMWtTH+stv0eW3k0OTx94K63xpxHty4= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.3/go.mod h1:V8MuRVcCRt5h1S+Fwu8KbC7l/gBGo3yBAyUbJM2IJOk= -github.com/aws/aws-sdk-go-v2/service/ecr v1.28.0 h1:rdPrcOZmqT2F+yzmKEImrx5XUs7Hpf4V9Rp6E8mhsxQ= -github.com/aws/aws-sdk-go-v2/service/ecr v1.28.0/go.mod h1:if7ybzzjOmDB8pat9FE35AHTY6ZxlYSy3YviSmFZv8c= -github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.23.5 h1:452e/nFuqPvwPg+1OD2CG/v29R9MH8egJSJKh2Qduv8= -github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.23.5/go.mod h1:8pvvNAklmq+hKmqyvFoMRg0bwg9sdGOvdwximmKiKP0= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 h1:Ji0DY1xUsUr3I8cHps0G+XM3WWU16lP6yG8qu1GAZAs= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2/go.mod h1:5CsjAbs3NlGQyZNFACh+zztPDI7fU6eW9QsxjfnuBKg= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.5 h1:mbWNpfRUTT6bnacmvOTKXZjR/HycibdWzNpfbrbLDIs= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.5/go.mod h1:FCOPWGjsshkkICJIn9hq9xr6dLKtyaWpuUojiN3W1/8= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7 h1:ogRAwT1/gxJBcSWDMZlgyFUM962F51A5CRhDLbxLdmo= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7/go.mod h1:YCsIZhXfRPLFFCl5xxY+1T9RKzOKjCut+28JSX2DnAk= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.3 h1:4t+QEX7BsXz98W8W1lNvMAG+NX8qHz2CjLBxQKku40g= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.3/go.mod h1:oFcjjUq5Hm09N9rpxTdeMeLeQcxS7mIkBkL8qUKng+A= -github.com/aws/aws-sdk-go-v2/service/kms v1.30.0 h1:yS0JkEdV6h9JOo8sy2JSpjX+i7vsKifU8SIeHrqiDhU= -github.com/aws/aws-sdk-go-v2/service/kms v1.30.0/go.mod h1:+I8VUUSVD4p5ISQtzpgSva4I8cJ4SQ4b1dcBcof7O+g= -github.com/aws/aws-sdk-go-v2/service/s3 v1.51.4 h1:lW5xUzOPGAMY7HPuNF4FdyBwRc3UJ/e8KsapbesVeNU= -github.com/aws/aws-sdk-go-v2/service/s3 v1.51.4/go.mod h1:MGTaf3x/+z7ZGugCGvepnx2DS6+caCYYqKhzVoLNYPk= -github.com/aws/aws-sdk-go-v2/service/sso v1.20.6 h1:o5cTaeunSpfXiLTIBx5xo2enQmiChtu1IBbzXnfU9Hs= -github.com/aws/aws-sdk-go-v2/service/sso v1.20.6/go.mod h1:qGzynb/msuZIE8I75DVRCUXw3o3ZyBmUvMwQ2t/BrGM= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.24.0 h1:Qe0r0lVURDDeBQJ4yP+BOrJkvkiCo/3FH/t+wY11dmw= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.24.0/go.mod h1:mUYPBhaF2lGiukDEjJX2BLRRKTmoUSitGDUgM4tRxak= -github.com/aws/aws-sdk-go-v2/service/sts v1.28.7 h1:et3Ta53gotFR4ERLXXHIHl/Uuk1qYpP5uU7cvNql8ns= -github.com/aws/aws-sdk-go-v2/service/sts v1.28.7/go.mod h1:FZf1/nKNEkHdGGJP/cI2MoIMquumuRK6ol3QQJNDxmw= -github.com/aws/smithy-go v1.20.2 h1:tbp628ireGtzcHDDmLT/6ADHidqnwgF57XOXZe6tp4Q= -github.com/aws/smithy-go v1.20.2/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= -github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20240514230400-03fa26f5508f h1:Z0kS9pJDQgCg3u2lH6+CdYaFbyQtyukVTiUCG6re0E4= -github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20240514230400-03fa26f5508f/go.mod h1:rAE739ssmE5O5fLuQ2y8uHdmOJaelE5I0Es3SxV0y1A= +github.com/atc0005/go-teams-notify/v2 v2.13.0 h1:nbDeHy89NjYlF/PEfLVF6lsserY9O5SnN1iOIw3AxXw= +github.com/atc0005/go-teams-notify/v2 v2.13.0/go.mod h1:WSv9moolRsBcpZbwEf6gZxj7h0uJlJskJq5zkEWKO8Y= +github.com/aws/aws-sdk-go v1.55.6 h1:cSg4pvZ3m8dgYcgqB97MrcdjUmZ1BeMYKUxMMB89IPk= +github.com/aws/aws-sdk-go v1.55.6/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= +github.com/aws/aws-sdk-go-v2 v1.37.1 h1:SMUxeNz3Z6nqGsXv0JuJXc8w5YMtrQMuIBmDx//bBDY= +github.com/aws/aws-sdk-go-v2 v1.37.1/go.mod h1:9Q0OoGQoboYIAJyslFyF1f5K1Ryddop8gqMhWx/n4Wg= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.0 h1:6GMWV6CNpA/6fbFHnoAjrv4+LGfyTqZz2LtCHnspgDg= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.0/go.mod h1:/mXlTIVG9jbxkqDnr5UQNQxW1HRYxeGklkM9vAFeabg= +github.com/aws/aws-sdk-go-v2/config v1.29.17 h1:jSuiQ5jEe4SAMH6lLRMY9OVC+TqJLP5655pBGjmnjr0= +github.com/aws/aws-sdk-go-v2/config v1.29.17/go.mod h1:9P4wwACpbeXs9Pm9w1QTh6BwWwJjwYvJ1iCt5QbCXh8= +github.com/aws/aws-sdk-go-v2/credentials v1.17.70 h1:ONnH5CM16RTXRkS8Z1qg7/s2eDOhHhaXVd72mmyv4/0= +github.com/aws/aws-sdk-go-v2/credentials v1.17.70/go.mod h1:M+lWhhmomVGgtuPOhO85u4pEa3SmssPTdcYpP/5J/xc= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.32 h1:KAXP9JSHO1vKGCr5f4O6WmlVKLFFXgWYAGoJosorxzU= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.32/go.mod h1:h4Sg6FQdexC1yYG9RDnOvLbW1a/P986++/Y/a+GyEM8= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.69 h1:6VFPH/Zi9xYFMJKPQOX5URYkQoXRWeJ7V/7Y6ZDYoms= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.69/go.mod h1:GJj8mmO6YT6EqgduWocwhMoxTLFitkhIrK+owzrYL2I= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.1 h1:ksZXBYv80EFTcgc8OJO48aQ8XDWXIQL7gGasPeCoTzI= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.1/go.mod h1:HSksQyyJETVZS7uM54cir0IgxttTD+8aEoJMPGepHBI= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.1 h1:+dn/xF/05utS7tUhjIcndbuaPjfll2LhbH1cCDGLYUQ= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.1/go.mod h1:hyAGz30LHdm5KBZDI58MXx5lDVZ5CUfvfTZvMu4HCZo= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.1 h1:4HbnOGE9491a9zYJ9VpPh1ApgEq6ZlD4Kuv1PJenFpc= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.1/go.mod h1:Z6QnHC6TmpJWUxAy8FI4JzA7rTwl6EIANkyK9OR5z5w= +github.com/aws/aws-sdk-go-v2/service/ecr v1.45.1 h1:Bwzh202Aq7/MYnAjXA9VawCf6u+hjwMdoYmZ4HYsdf8= +github.com/aws/aws-sdk-go-v2/service/ecr v1.45.1/go.mod h1:xZzWl9AXYa6zsLLH41HBFW8KRKJRIzlGmvSM0mVMIX4= +github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.33.2 h1:XJ/AEFYj9VFPJdF+VFi4SUPEDfz1akHwxxm07JfZJcs= +github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.33.2/go.mod h1:JUBHdhvKbbKmhaHjLsKJAWnQL80T6nURmhB/LEprV+4= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0 h1:6+lZi2JeGKtCraAj1rpoZfKqnQ9SptseRZioejfUOLM= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0/go.mod h1:eb3gfbVIxIoGgJsi9pGne19dhCBpK6opTYpQqAmdy44= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.1 h1:ps3nrmBWdWwakZBydGX1CxeYFK80HsQ79JLMwm7Y4/c= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.1/go.mod h1:bAdfrfxENre68Hh2swNaGEVuFYE74o0SaSCAlaG9E74= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.1 h1:ky79ysLMxhwk5rxJtS+ILd3Mc8kC5fhsLBrP27r6h4I= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.1/go.mod h1:+2MmkvFvPYM1vsozBWduoLJUi5maxFk5B7KJFECujhY= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.1 h1:MdVYlN5pcQu1t1OYx4Ajo3fKl1IEhzgdPQbYFCRjYS8= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.1/go.mod h1:iikmNLrvHm2p4a3/4BPeix2S9P+nW8yM1IZW73x8bFA= +github.com/aws/aws-sdk-go-v2/service/kms v1.38.1 h1:tecq7+mAav5byF+Mr+iONJnCBf4B4gon8RSp4BrweSc= +github.com/aws/aws-sdk-go-v2/service/kms v1.38.1/go.mod h1:cQn6tAF77Di6m4huxovNM7NVAozWTZLsDRp9t8Z/WYk= +github.com/aws/aws-sdk-go-v2/service/s3 v1.85.1 h1:Hsqo8+dFxSdDvv9B2PgIx1AJAnDpqgS0znVI+R+MoGY= +github.com/aws/aws-sdk-go-v2/service/s3 v1.85.1/go.mod h1:8Q0TAPXD68Z8YqlcIGHs/UNIDHsxErV9H4dl4vJEpgw= +github.com/aws/aws-sdk-go-v2/service/sso v1.25.5 h1:AIRJ3lfb2w/1/8wOOSqYb9fUKGwQbtysJ2H1MofRUPg= +github.com/aws/aws-sdk-go-v2/service/sso v1.25.5/go.mod h1:b7SiVprpU+iGazDUqvRSLf5XmCdn+JtT1on7uNL6Ipc= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.3 h1:BpOxT3yhLwSJ77qIY3DoHAQjZsc4HEGfMCE4NGy3uFg= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.3/go.mod h1:vq/GQR1gOFLquZMSrxUK/cpvKCNVYibNyJ1m7JrU88E= +github.com/aws/aws-sdk-go-v2/service/sts v1.34.0 h1:NFOJ/NXEGV4Rq//71Hs1jC/NvPs1ezajK+yQmkwnPV0= +github.com/aws/aws-sdk-go-v2/service/sts v1.34.0/go.mod h1:7ph2tGpfQvwzgistp2+zga9f+bCjlQJPkPUmMgDSD7w= +github.com/aws/smithy-go v1.22.5 h1:P9ATCXPMb2mPjYBgueqJNCA5S9UfktsW0tTxi+a7eqw= +github.com/aws/smithy-go v1.22.5/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= +github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.10.1 h1:6lMw4/QGLFPvbKQ0eri/9Oh3YX5Nm6BPrUlZR8yuJHg= +github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.10.1/go.mod h1:EVJOSYOVeoD3VFFZ/dWCAzWJp5wZr9lTOCjW8ejAmO0= github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k= github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8= github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk= github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/blacktop/go-dwarf v1.0.9 h1:eT/L7gt0gllvvgnRXY0MFKjNB6+jtOY5DTm2ynVX2dY= -github.com/blacktop/go-dwarf v1.0.9/go.mod h1:4W2FKgSFYcZLDwnR7k+apv5i3nrau4NGl9N6VQ9DSTo= -github.com/blacktop/go-macho v1.1.162 h1:FjM3XAsJTAOGZ1eppRSX9ZBX3Bk11JMTC1amsZAOA5I= -github.com/blacktop/go-macho v1.1.162/go.mod h1:f2X4noFBob4G5bWUrzvPBKDVcFWZgDCM7rIn7ygTID0= +github.com/blacktop/go-dwarf v1.0.10 h1:i9zYgcIROETsNZ6V+zZn3uDH21FCG5BLLZ837GitxS0= +github.com/blacktop/go-dwarf v1.0.10/go.mod h1:4W2FKgSFYcZLDwnR7k+apv5i3nrau4NGl9N6VQ9DSTo= +github.com/blacktop/go-macho v1.1.238 h1:OFfT6NB/SWxkoky7L/ytuY8QekgFpa9pmz/GHUQLsmM= +github.com/blacktop/go-macho v1.1.238/go.mod h1:dtlW2AJKQpFzImBVPWiUKZ6OxrQ2MLfWi/BPPe0EONE= github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb h1:m935MPodAbYS46DG4pJSv7WO+VECIWUQ7OJYSoTrMh4= github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb/go.mod h1:PkYb9DJNAwrSvRx5DYA+gUcOIgTGVMNkfSCbZM8cWpI= -github.com/bluesky-social/indigo v0.0.0-20240411170459-440932307e0d h1:xxPhzCOpmOntzVe8S6tqsMdFgaB8B4NXSV54lG4B1qk= -github.com/bluesky-social/indigo v0.0.0-20240411170459-440932307e0d/go.mod h1:ysMQ0a4RYWjgyvKrl5ME352oHA6QgK900g5sB9XXgPE= +github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/bluesky-social/indigo v0.0.0-20240813042137-4006c0eca043 h1:927VIkxPFKpfJKVDtCNgSQtlhksARaLvsLxppR2FukM= +github.com/bluesky-social/indigo v0.0.0-20240813042137-4006c0eca043/go.mod h1:dXjdzg6bhg1JKnKuf6EBJTtcxtfHYBFEe9btxX5YeAE= github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= -github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= -github.com/caarlos0/ctrlc v1.2.0 h1:AtbThhmbeYx1WW3WXdWrd94EHKi+0NPRGS4/4pzrjwk= -github.com/caarlos0/ctrlc v1.2.0/go.mod h1:n3gDlSjsXZ7rbD9/RprIR040b7oaLfNStikPd4gFago= -github.com/caarlos0/env/v11 v11.0.1 h1:A8dDt9Ub9ybqRSUF3fQc/TA/gTam2bKT4Pit+cwrsPs= -github.com/caarlos0/env/v11 v11.0.1/go.mod h1:2RC3HQu8BQqtEK3V4iHPxj0jOdWdbPpWJ6pOueeU1xM= +github.com/caarlos0/env/v11 v11.3.1 h1:cArPWC15hWmEt+gWk7YBi7lEXTXCvpaSdCiZE2X5mCA= +github.com/caarlos0/env/v11 v11.3.1/go.mod h1:qupehSf/Y0TUTsxKywqRt/vJjN5nz6vauiYEUUr8P4U= github.com/caarlos0/go-reddit/v3 v3.0.1 h1:w8ugvsrHhaE/m4ez0BO/sTBOBWI9WZTjG7VTecHnql4= github.com/caarlos0/go-reddit/v3 v3.0.1/go.mod h1:QlwgmG5SAqxMeQvg/A2dD1x9cIZCO56BMnMdjXLoisI= github.com/caarlos0/go-shellwords v1.0.12 h1:HWrUnu6lGbWfrDcFiHcZiwOLzHWjjrPVehULaTFgPp8= github.com/caarlos0/go-shellwords v1.0.12/go.mod h1:bYeeX1GrTLPl5cAMYEzdm272qdsQAZiaHgeF0KTk1Gw= -github.com/caarlos0/go-version v0.1.1 h1:1bikKHkGGVIIxqCmufhSSs3hpBScgHGacrvsi8FuIfc= -github.com/caarlos0/go-version v0.1.1/go.mod h1:Ze5Qx4TsBBi5FyrSKVg1Ibc44KGV/llAaKGp86oTwZ0= -github.com/caarlos0/log v0.4.4 h1:LnvgBz/ofsJ00AupP/cEfksJSZglb1L69g4Obk/sdAc= -github.com/caarlos0/log v0.4.4/go.mod h1:+AmCI9Liv5LKXmzFmFI1htuHdTTj/0R3KuoP9DMY7Mo= +github.com/caarlos0/go-version v0.2.1 h1:bJY5WRvs2RXErLKBELd1WR0U72whX8ELbKg0WtQ9/7A= +github.com/caarlos0/go-version v0.2.1/go.mod h1:X+rI5VAtJDpcjCjeEIXpxGa5+rTcgur1FK66wS0/944= +github.com/caarlos0/log v0.5.1 h1:uB1jhC/+HimtyyL7pxidkUWO4raKmidVuXifC4uqMf8= +github.com/caarlos0/log v0.5.1/go.mod h1:37k7VCogxsMsgpIQaca5g9eXFFrLJ5LGgA4Ng/xN85o= github.com/caarlos0/testfs v0.4.4/go.mod h1:bRN55zgG4XCUVVHZCeU+/Tz1Q6AxEJOEJTliBy+1DMk= github.com/carlmjohnson/versioninfo v0.22.5 h1:O00sjOLUAFxYQjlN/bzYTuZiS0y6fWDQjMRvwtKgwwc= github.com/carlmjohnson/versioninfo v0.22.5/go.mod h1:QT9mph3wcVfISUKd0i9sZfVrPviHuSF+cUtLjm2WSf8= github.com/cavaliergopher/cpio v1.0.1 h1:KQFSeKmZhv0cr+kawA3a0xTQCU4QxXF1vhU7P7av2KM= github.com/cavaliergopher/cpio v1.0.1/go.mod h1:pBdaqQjnvXxdS/6CvNDwIANIFSP0xRKI16PX4xejRQc= github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= -github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= -github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/certifi/gocertifi v0.0.0-20180118203423-deb3ae2ef261/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4= -github.com/charmbracelet/bubbletea v0.22.1 h1:z66q0LWdJNOWEH9zadiAIXp2GN1AWrwNXU8obVY9X24= -github.com/charmbracelet/bubbletea v0.22.1/go.mod h1:8/7hVvbPN6ZZPkczLiB8YpLkLJ0n7DMho5Wvfd2X1C0= -github.com/charmbracelet/lipgloss v0.10.0 h1:KWeXFSexGcfahHX+54URiZGkBFazf70JNMtwg/AFW3s= -github.com/charmbracelet/lipgloss v0.10.0/go.mod h1:Wig9DSfvANsxqkRsqj6x87irdy123SR4dOXlKa91ciE= -github.com/charmbracelet/x/exp/ordered v0.0.0-20231010190216-1cb11efc897d h1:+o+e/8hf7cG0SbAzEAm/usJ8qoZPgFXhudLjop+TM0g= -github.com/charmbracelet/x/exp/ordered v0.0.0-20231010190216-1cb11efc897d/go.mod h1:aoG4bThKYIOnyB55r202eHqo6TkN7ZXV+cu4Do3eoBQ= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/charmbracelet/bubbletea v1.3.0 h1:fPMyirm0u3Fou+flch7hlJN9krlnVURrkUVDwqXjoAc= +github.com/charmbracelet/bubbletea v1.3.0/go.mod h1:eTaHfqbIwvBhFQM/nlT1NsGc4kp8jhF8LfUK67XiTDM= +github.com/charmbracelet/colorprofile v0.3.1 h1:k8dTHMd7fgw4bnFd7jXTLZrSU/CQrKnL3m+AxCzDz40= +github.com/charmbracelet/colorprofile v0.3.1/go.mod h1:/GkGusxNs8VB/RSOh3fu0TJmQ4ICMMPApIIVn0KszZ0= +github.com/charmbracelet/fang v0.3.0 h1:Be6TB+ExS8VWizTQRJgjqbJBudKrmVUet65xmFPGhaA= +github.com/charmbracelet/fang v0.3.0/go.mod h1:b0ZfEXZeBds0I27/wnTfnv2UVigFDXHhrFNwQztfA0M= +github.com/charmbracelet/lipgloss v1.1.0 h1:vYXsiLHVkK7fp74RkV7b2kq9+zDLoEU4MZoFqR/noCY= +github.com/charmbracelet/lipgloss v1.1.0/go.mod h1:/6Q8FR2o+kj8rz4Dq0zQc3vYf7X+B0binUUBwA0aL30= +github.com/charmbracelet/lipgloss/v2 v2.0.0-beta1 h1:SOylT6+BQzPHEjn15TIzawBPVD0QmhKXbcb3jY0ZIKU= +github.com/charmbracelet/lipgloss/v2 v2.0.0-beta1/go.mod h1:tRlx/Hu0lo/j9viunCN2H+Ze6JrmdjQlXUQvvArgaOc= +github.com/charmbracelet/x/ansi v0.8.0 h1:9GTq3xq9caJW8ZrBTe0LIe2fvfLR/bYXKTx2llXn7xE= +github.com/charmbracelet/x/ansi v0.8.0/go.mod h1:wdYl/ONOLHLIVmQaxbIYEC/cRKOQyjTkowiI4blgS9Q= +github.com/charmbracelet/x/cellbuf v0.0.13 h1:/KBBKHuVRbq1lYx5BzEHBAFBP8VcQzJejZ/IA3iR28k= +github.com/charmbracelet/x/cellbuf v0.0.13/go.mod h1:xe0nKWGd3eJgtqZRaN9RjMtK7xUYchjzPr7q6kcvCCs= +github.com/charmbracelet/x/exp/charmtone v0.0.0-20250603201427-c31516f43444 h1:IJDiTgVE56gkAGfq0lBEloWgkXMk4hl/bmuPoicI4R0= +github.com/charmbracelet/x/exp/charmtone v0.0.0-20250603201427-c31516f43444/go.mod h1:T9jr8CzFpjhFVHjNjKwbAD7KwBNyFnj2pntAO7F2zw0= +github.com/charmbracelet/x/term v0.2.1 h1:AQeHeLZ1OqSXhrAWpYUtZyX1T3zVxfpZuEQMIQaGIAQ= +github.com/charmbracelet/x/term v0.2.1/go.mod h1:oQ4enTYFV7QN4m0i9mzHrViD7TQKvNEEkHUMCmsxdUg= github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589 h1:krfRl01rzPzxSxyLyrChD+U+MzsBXbm0OwYYB67uF+4= github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589/go.mod h1:OuDyvmLnMCwa2ep4Jkm6nyA0ocJuZlGyk2gGseVzERM= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= -github.com/cloudflare/circl v1.3.8 h1:j+V8jJt09PoeMFIu2uh5JUyEaIHTXVOHslFoLNAKqwI= -github.com/cloudflare/circl v1.3.8/go.mod h1:PDRU+oXvdD7KCtgKxW95M5Z8BpSCJXQORiZFnBQS5QU= +github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0= +github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= -github.com/containerd/console v1.0.4 h1:F2g4+oChYvBTsASRTz8NP6iIAi97J3TtSAsLbIFn4ro= -github.com/containerd/console v1.0.4/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk= -github.com/containerd/stargz-snapshotter/estargz v0.14.3 h1:OqlDCK3ZVUO6C3B/5FSkDwbkEETK84kQgEeFwDC+62k= -github.com/containerd/stargz-snapshotter/estargz v0.14.3/go.mod h1:KY//uOCIkSuNAHhJogcZtrNHdKrA99/FCCRjE3HD36o= +github.com/cncf/xds/go v0.0.0-20250326154945-ae57f3c0d45f h1:C5bqEmzEPLsHm9Mv73lSE9e9bKV23aB1vxOsmZrkl3k= +github.com/cncf/xds/go v0.0.0-20250326154945-ae57f3c0d45f/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= +github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= +github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= +github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= +github.com/containerd/stargz-snapshotter/estargz v0.16.3 h1:7evrXtoh1mSbGj/pfRccTampEyKpjpOnS3CyiV1Ebr8= +github.com/containerd/stargz-snapshotter/estargz v0.16.3/go.mod h1:uyr4BfYfOj3G9WBVE8cOlQmXAbPN9VEQpBBeJIuOipU= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM= -github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= -github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo= +github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 h1:uX1JmpONuD549D73r6cgnxyUu18Zb7yHAy5AYU0Pm4Q= +github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= +github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s= +github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davidmz/go-pageant v1.0.2 h1:bPblRCh5jGU+Uptpz6LgMZGD5hJoOt7otgT454WvHn0= @@ -204,62 +244,75 @@ github.com/dghubble/oauth1 v0.7.3 h1:EkEM/zMDMp3zOsX2DC/ZQ2vnEX3ELK0/l9kb+vs4ptE github.com/dghubble/oauth1 v0.7.3/go.mod h1:oxTe+az9NSMIucDPDCCtzJGsPhciJV33xocHfcR2sVY= github.com/dghubble/sling v1.4.0 h1:/n8MRosVTthvMbwlNZgLx579OGVjUOy3GNEv5BIqAWY= github.com/dghubble/sling v1.4.0/go.mod h1:0r40aNsU9EdDUVBNhfCstAtFgutjgJGYbO1oNzkMoM8= +github.com/digitorus/pkcs7 v0.0.0-20230713084857-e76b763bdc49/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc= +github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 h1:ge14PCmCvPjpMQMIAH7uKg0lrtNSOdpYsRXlwk3QbaE= +github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc= +github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 h1:lxmTCgmHE1GUYL7P0MlNa00M67axePTq+9nBSGddR8I= +github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7/go.mod h1:GvWntX9qiTlOud0WkQ6ewFm0LPy5JUR1Xo0Ngbd1w6Y= github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= -github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= -github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/cli v25.0.4+incompatible h1:DatRkJ+nrFoYL2HZUzjM5Z5sAmcA5XGp+AW0oEw2+cA= -github.com/docker/cli v25.0.4+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/cli v28.2.2+incompatible h1:qzx5BNUDFqlvyq4AHzdNB7gSyVTmU4cgsyN9SdInc1A= +github.com/docker/cli v28.2.2+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= -github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker-credential-helpers v0.8.1 h1:j/eKUktUltBtMzKqmfLB0PAgqYyMHOp5vfsD1807oKo= -github.com/docker/docker-credential-helpers v0.8.1/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= -github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= -github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/docker v28.3.3+incompatible h1:Dypm25kh4rmk49v1eiVbsAtpAsYURjYkaKubwuBdxEI= +github.com/docker/docker v28.3.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.9.3 h1:gAm/VtF9wgqJMoxzT3Gj5p4AqIjCBS4wrsOh9yRqcz8= +github.com/docker/docker-credential-helpers v0.9.3/go.mod h1:x+4Gbw9aGmChi3qTLZj8Dfn0TD20M/fuWy0E5+WDeCo= +github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/elliotchance/orderedmap/v2 v2.2.0 h1:7/2iwO98kYT4XkOjA9mBEIwvi4KpGB4cyHeOFOnj4Vk= -github.com/elliotchance/orderedmap/v2 v2.2.0/go.mod h1:85lZyVbpGaGvHvnKa7Qhx7zncAdBIBq6u56Hb1PRU5Q= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane/envoy v1.32.4 h1:jb83lalDRZSpPWW2Z7Mck/8kXZ5CQAFYVjQcdVIr83A= +github.com/envoyproxy/go-control-plane/envoy v1.32.4/go.mod h1:Gzjc5k8JcJswLjAx1Zm+wSYE20UrLtt7JZMWiWQXQEw= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= -github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= +github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8= +github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU= +github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4= +github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f/go.mod h1:vw97MGsxSvLiUE2X8qFplwetxpGLQrlU1Q9AUEIzCaM= +github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= +github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= github.com/fatih/set v0.2.1/go.mod h1:+RKtMCH+favT2+3YecHGxcc0b4KyVWA1QWWJUs4E0CI= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU= -github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/gabriel-vasile/mimetype v1.4.8 h1:FfZ3gj38NjllZIeJAmMhr+qKL8Wu+nOoI3GqacKw1NM= +github.com/gabriel-vasile/mimetype v1.4.8/go.mod h1:ByKUIKGjh1ODkGM1asKUbQZOLGrPjydw3hYPU2YU9t8= github.com/github/smimesign v0.2.0 h1:Hho4YcX5N1I9XNqhq0fNx0Sts8MhLonHd+HRXVGNjvk= github.com/github/smimesign v0.2.0/go.mod h1:iZiiwNT4HbtGRVqCQu7uJPEZCuEE5sfSSttcnePkDl4= +github.com/go-chi/chi v4.1.2+incompatible h1:fGFk2Gmi/YKXk0OmGfBh0WgmN3XB8lVnEyNz34tQRec= +github.com/go-chi/chi v4.1.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= github.com/go-fed/httpsig v1.1.0 h1:9M+hb0jkEICD8/cAiNqEB66R87tTINszBRTjwjQzWcI= github.com/go-fed/httpsig v1.1.0/go.mod h1:RCMrTZvN1bJYtofsG4rd5NaO5obxQ5xBkdiS7xsT7bM= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= -github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= -github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow= -github.com/go-git/go-git/v5 v5.12.0 h1:7Md+ndsjrzZxbddRDZjF14qK+NN56sy6wkqaVrjZtys= -github.com/go-git/go-git/v5 v5.12.0/go.mod h1:FTM9VKtnI2m65hNI/TenDDDnUf2Q9FHnXYjuz9i5OEY= +github.com/go-git/go-billy/v5 v5.6.2 h1:6Q86EsPXMa7c3YZ3aLAQsMA0VlWmy43r6FHqa/UNbRM= +github.com/go-git/go-billy/v5 v5.6.2/go.mod h1:rcFC2rAsp/erv7CMz9GczHcuD0D32fWzH+MJAU+jaUU= +github.com/go-git/go-git/v5 v5.14.0 h1:/MD3lCrGjCen5WfEAzKg00MJJffKhC8gzS80ycmCi60= +github.com/go-git/go-git/v5 v5.14.0/go.mod h1:Z5Xhoia5PcWA3NF8vRLURn9E5FRhSl7dGj9ItW3Wk5k= +github.com/go-jose/go-jose/v4 v4.1.0 h1:cYSYxd3pw5zd2FSXk2vGdn9igQU2PS8MuxrCOCl0FdY= +github.com/go-jose/go-jose/v4 v4.1.0/go.mod h1:GG/vqmYm3Von2nYiB2vGTXzdoNKE5tix5tuc6iAd+sw= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU= github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo= -github.com/go-openapi/errors v0.22.0 h1:c4xY/OLxUBSTiepAg3j/MHuAv5mJhnf53LLMWFB+u/w= -github.com/go-openapi/errors v0.22.0/go.mod h1:J3DmZScxCDufmIMsdOuDHxJbdOGC0xtUynjIx092vXE= -github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= -github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/errors v0.22.1 h1:kslMRRnK7NCb/CvR1q1VWuEQCEIsBGn5GgKD9e+HYhU= +github.com/go-openapi/errors v0.22.1/go.mod h1:+n/5UdIqdVnLIJ6Q9Se8HNGUXYaY6CN8ImWzfi/Gzp0= +github.com/go-openapi/jsonpointer v0.21.1 h1:whnzv/pNXtK2FbX/W9yJfRmE2gsmkfahjMKB0fZvcic= +github.com/go-openapi/jsonpointer v0.21.1/go.mod h1:50I1STOfbY1ycR8jGz8DaMeLCdXiI6aDteEdRNNzpdk= github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= github.com/go-openapi/loads v0.22.0 h1:ECPGd4jX1U6NApCGG1We+uEozOAvXvJSF4nnwHZ8Aco= @@ -270,15 +323,16 @@ github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9Z github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk= github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c= github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4= -github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= -github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU= +github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0= github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58= github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ= github.com/go-restruct/restruct v1.2.0-alpha h1:2Lp474S/9660+SJjpVxoKuWX09JsXHSrdV7Nv3/gkvc= github.com/go-restruct/restruct v1.2.0-alpha/go.mod h1:KqrpKpn4M8OLznErihXTGLlsXFGeLxHUrLRRI/1YjGk= github.com/go-telegram-bot-api/telegram-bot-api/v5 v5.5.1 h1:wG8n/XJQ07TmjbITcGiUaOtXxdrINDz1b0J1w0SzqDc= github.com/go-telegram-bot-api/telegram-bot-api/v5 v5.5.1/go.mod h1:A2S0CWkNylc2phvKXWBBdD3K0iGnDBGbzRpISP2zBl8= -github.com/go-test/deep v1.0.4/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/go-viper/mapstructure/v2 v2.3.0 h1:27XbWsHIqhbdR5TIC911OfYvgSaW93HM+dX7970Q7jk= +github.com/go-viper/mapstructure/v2 v2.3.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= @@ -286,14 +340,15 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= -github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= -github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI= +github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8= +github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -304,8 +359,8 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= -github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/certificate-transparency-go v1.3.1 h1:akbcTfQg0iZlANZLn0L9xOeWtyCIdeoYhKrqi5iH3Go= +github.com/google/certificate-transparency-go v1.3.1/go.mod h1:gg+UQlx6caKEDQ9EElFOujyxEQEfOiQzAt6782Bvi8k= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -313,73 +368,69 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-containerregistry v0.19.1 h1:yMQ62Al6/V0Z7CqIrrS1iYoA5/oQCm88DeNujc7C1KY= -github.com/google/go-containerregistry v0.19.1/go.mod h1:YCMFNQeeXeLF+dnhhWkqDItx/JSkH01j1Kis4PsjzFI= -github.com/google/go-github/v62 v62.0.0 h1:/6mGCaRywZz9MuHyw9gD1CwsbmBX8GWsbFkwMmHdhl4= -github.com/google/go-github/v62 v62.0.0/go.mod h1:EMxeUqGJq2xRu9DYBMwel/mr7kZrzUOfQmmpYrZn2a4= +github.com/google/go-containerregistry v0.20.6 h1:cvWX87UxxLgaH76b4hIvya6Dzz9qHB31qAwjAohdSTU= +github.com/google/go-containerregistry v0.20.6/go.mod h1:T0x8MuoAoKX/873bkeSfLD2FAkwCDf9/HZgsFJ02E2Y= +github.com/google/go-github/v74 v74.0.0 h1:yZcddTUn8DPbj11GxnMrNiAnXH14gNs559AsUpNpPgM= +github.com/google/go-github/v74 v74.0.0/go.mod h1:ubn/YdyftV80VPSI26nSJvaEsTOnsjrxG3o9kJhcyak= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= -github.com/google/ko v0.15.4 h1:0blRbIdPmSy6v4LvedGxbI/8krdJYQgbSih3v6Y8V1c= -github.com/google/ko v0.15.4/go.mod h1:ZkcmfV91Xt6ZzOBHc/cXXGYnqWdNWDVy/gHoUU9sjag= +github.com/google/ko v0.18.0 h1:jkF5Fkvm+SMtqTt/SMzsCJO+6hz7FSDE6GRldGn0VVI= +github.com/google/ko v0.18.0/go.mod h1:iR0zT5aR4pINW9tk2Ujj99dBJ7cVy4to9ZirAkGKb9g= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/rpmpack v0.6.1-0.20240329070804-c2247cbb881a h1:JJBdjSfqSy3mnDT0940ASQFghwcZ4y4cb6ttjAoXqwE= -github.com/google/rpmpack v0.6.1-0.20240329070804-c2247cbb881a/go.mod h1:uqVAUVQLq8UY2hCDfmJ/+rtO3aw7qyhc90rCVEabEfI= -github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= -github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= -github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 h1:SJ+NtwL6QaZ21U+IrK7d0gGgpjGGvd2kz+FzTHVzdqI= -github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2/go.mod h1:Tv1PlzqC9t8wNnpPdctvtSUOPUUg4SHeE6vR1Ir2hmg= +github.com/google/rpmpack v0.7.0 h1:mA2Yd3/dOmao1ype0DJA8DFquEpslaleywOuglVCrUs= +github.com/google/rpmpack v0.7.0/go.mod h1:uqVAUVQLq8UY2hCDfmJ/+rtO3aw7qyhc90rCVEabEfI= +github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= +github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= +github.com/google/safetext v0.0.0-20240722112252-5a72de7e7962 h1:+9C/TgFfcCmZBV7Fjb3kQCGlkpFrhtvFDgbdQHB9RaA= +github.com/google/safetext v0.0.0-20240722112252-5a72de7e7962/go.mod h1:H3K1Iu/utuCfa10JO+GsmKUYSWi7ug57Rk6GaDRHaaQ= github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/wire v0.6.0 h1:HBkoIh4BdSxoyo9PveV8giw7ZsaBOvzWKfcg/6MrVwI= github.com/google/wire v0.6.0/go.mod h1:F4QhpQ9EDIdJ1Mbop/NZBRB+5yrR6qg3BnctaoUk6NA= -github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= -github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.12.3 h1:5/zPPDvw8Q1SuXjrqrZslrqT7dL/uJT2CQii/cLCKqA= -github.com/googleapis/gax-go/v2 v2.12.3/go.mod h1:AKloxT6GtNbaLm8QTNSidHUVsHYcBHwWRvkNFJUQcS4= +github.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU9uHLo7OnF5tL52HFAgMmyrf4= +github.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= +github.com/googleapis/gax-go/v2 v2.14.2 h1:eBLnkZ9635krYIPD+ag1USrOAI0Nr0QYF3+/3GqO0k0= +github.com/googleapis/gax-go/v2 v2.14.2/go.mod h1:ON64QhlJkhVtSqp4v1uaK92VyZ2gmvDQsweuyLV+8+w= github.com/gookit/color v1.2.5/go.mod h1:AhIE+pS6D4Ql0SQWbBeXPHw7gY0/sjHoA4s/n1KB7xg= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/goreleaser/chglog v0.6.1 h1:NZKiX8l0FTQPRzBgKST7knvNZmZ04f7PEGkN2wInfhE= -github.com/goreleaser/chglog v0.6.1/go.mod h1:Bnnfo07jMZkaAb0uRNASMZyOsX6ROW6X1qbXqN3guUo= +github.com/goreleaser/chglog v0.7.0 h1:/KzXWAeg4DrEz4r3OI6K2Yb8RAsVGeInCUfLWFXL9C0= +github.com/goreleaser/chglog v0.7.0/go.mod h1:2h/yyq9xvTUeM9tOoucBP+jri8Dj28splx+SjlYkklc= github.com/goreleaser/fileglob v1.3.0 h1:/X6J7U8lbDpQtBvGcwwPS6OpzkNVlVEsFUVRx9+k+7I= github.com/goreleaser/fileglob v1.3.0/go.mod h1:Jx6BoXv3mbYkEzwm9THo7xbr5egkAraxkGorbJb4RxU= -github.com/goreleaser/goreleaser v1.26.2 h1:1iY1HaXtRiMTrwy6KE1sNjkRjsjMi+9l0k6WUX8GpWw= -github.com/goreleaser/goreleaser v1.26.2/go.mod h1:mHi6zr6fuuOh5eHdWWgyo/N8BWED5WEVtb/4GETc9jQ= -github.com/goreleaser/nfpm/v2 v2.37.1 h1:RUmeEt8OlEVeSzKRrO5Vl5qVWCtUwx4j9uivGuRo5fw= -github.com/goreleaser/nfpm/v2 v2.37.1/go.mod h1:q8+sZXFqn106/eGw+9V+I8+izFxZ/sJjrhwmEUxXhUg= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= -github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= +github.com/goreleaser/goreleaser/v2 v2.11.2 h1:Od6dcPI5r8IWVPnJYz6wYe3rML1qf80fLzXB1Ix6ZnY= +github.com/goreleaser/goreleaser/v2 v2.11.2/go.mod h1:NSsia+m49thkd/pX9Rz7Cq1KE8HDGrLJVoPLjFeAV/4= +github.com/goreleaser/nfpm/v2 v2.43.0 h1:o5oureuZkhu55RK0M9WSN8JLW7hu6MymtMh7LypInlk= +github.com/goreleaser/nfpm/v2 v2.43.0/go.mod h1:f//PE8PjNHjaPCbd7Jkok+aPKdLTrzM+fuIWg3PfVRg= +github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= +github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-retryablehttp v0.7.5 h1:bJj+Pj19UZMIweq/iie+1u5YCdGrnxCT9yvm0e+Nd5M= -github.com/hashicorp/go-retryablehttp v0.7.5/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= -github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= -github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-retryablehttp v0.7.8 h1:ylXZWnqa7Lhqpk0L1P1LzDtGcCR0rPVUrx/c8Unxc48= +github.com/hashicorp/go-retryablehttp v0.7.8/go.mod h1:rjiScheydd+CxvumBsIrFKlx3iS0jrZ7LvzFGFmuKbw= +github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= +github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/hcl v1.0.1-vault-5 h1:kI3hhbbyzr4dldA8UdTb7ZlVVlI2DACdCfz31RPDgJM= -github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= -github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4= -github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= -github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= +github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/in-toto/attestation v1.1.1 h1:QD3d+oATQ0dFsWoNh5oT0udQ3tUrOsZZ0Fc3tSgWbzI= +github.com/in-toto/attestation v1.1.1/go.mod h1:Dcq1zVwA2V7Qin8I7rgOi+i837wEf/mOZwRm047Sjys= +github.com/in-toto/in-toto-golang v0.9.0 h1:tHny7ac4KgtsfrG6ybU8gVOZux2H8jN05AXJ9EBM1XU= +github.com/in-toto/in-toto-golang v0.9.0/go.mod h1:xsBVrVsHNsB61++S6Dy2vWosKhuA3lUTQd+eF9HdeMo= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/invopop/jsonschema v0.12.0 h1:6ovsNSuvn9wEQVOyc72aycBMVQFKz7cPdMJn10CvzRI= -github.com/invopop/jsonschema v0.12.0/go.mod h1:ffZ5Km5SWWRAIN6wbDXItl95euhFz2uON45H2qjYt+0= +github.com/invopop/jsonschema v0.13.0 h1:KvpoAJWEjR3uD9Kbm2HWJmqsEaHt8lBUpd0qHcIi21E= +github.com/invopop/jsonschema v0.13.0/go.mod h1:ffZ5Km5SWWRAIN6wbDXItl95euhFz2uON45H2qjYt+0= github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0= github.com/ipfs/go-block-format v0.2.0 h1:ZqrkxBA2ICbDRbK8KJs/u0O3dlp6gmAuuXUJNiW1Ycs= @@ -410,9 +461,10 @@ github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOl github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/jbenet/goprocess v0.1.4 h1:DRGOFReOMqqDNXwW70QkacFW0YN9QnwLV0Vqk+3oU0o= github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jedisct1/go-minisign v0.0.0-20241212093149-d2f9f49435c7 h1:FWpSWRD8FbVkKQu8M1DM9jF5oXFLyE+XpisIYfdzbic= +github.com/jedisct1/go-minisign v0.0.0-20241212093149-d2f9f49435c7/go.mod h1:BMxO138bOokdgt4UaxZiEfypcSHX0t6SIFimVP1oRfk= +github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24 h1:liMMTbpW34dhU4az1GN0pTPADwNmvoRSeoZ6PItiqnY= +github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= @@ -421,8 +473,8 @@ github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4 github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= -github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM= github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= @@ -432,46 +484,40 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/letsencrypt/boulder v0.0.0-20231026200631-000cd05d5491 h1:WGrKdjHtWC67RX96eTkYD2f53NDHhrq/7robWTAfk4s= -github.com/letsencrypt/boulder v0.0.0-20231026200631-000cd05d5491/go.mod h1:o158RFmdEbYyIZmXAbrvmJWesbyxlLKee6X64VPVuOc= +github.com/letsencrypt/boulder v0.0.0-20250411005613-d800055fe666 h1:ndfLOJNaxu0fX358UKxtq2bU8IMASWi87Hn0Nv/TIoY= +github.com/letsencrypt/boulder v0.0.0-20250411005613-d800055fe666/go.mod h1:WGXwLq/jKt0kng727wv6a0h0q7TVC+MwS2S75rcqL+4= github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY= github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= -github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= -github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= -github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= -github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= +github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= +github.com/mark3labs/mcp-go v0.36.0 h1:rIZaijrRYPeSbJG8/qNDe0hWlGrCJ7FWHNMz2SQpTis= +github.com/mark3labs/mcp-go v0.36.0/go.mod h1:T7tUa2jO6MavG+3P25Oy/jR7iCeJPHImCZHRymCn39g= github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-localereader v0.0.1 h1:ygSAOl7ZXTx4RdPYinUpg6W99U8jWvWi9Ye2JC/oIi4= -github.com/mattn/go-localereader v0.0.1/go.mod h1:8fBrzywKY7BI3czFoHkuzRoWE9C+EiG4R1k4Cjx5p88= -github.com/mattn/go-mastodon v0.0.8 h1:UgKs4SmQ5JeawxMIPP7NQ9xncmOXA+5q6jYk4erR7vk= -github.com/mattn/go-mastodon v0.0.8/go.mod h1:8YkqetHoAVEktRkK15qeiv/aaIMfJ/Gc89etisPZtHU= -github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= -github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= -github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-localereader v0.0.2-0.20220822084749-2491eb6c1c75 h1:P8UmIzZMYDR+NGImiFvErt6VWfIRPuGM+vyjiEdkmIw= +github.com/mattn/go-localereader v0.0.2-0.20220822084749-2491eb6c1c75/go.mod h1:8fBrzywKY7BI3czFoHkuzRoWE9C+EiG4R1k4Cjx5p88= +github.com/mattn/go-mastodon v0.0.10 h1:wz1d/aCkJOIkz46iv4eAqXHVreUMxydY1xBWrPBdDeE= +github.com/mattn/go-mastodon v0.0.10/go.mod h1:YBofeqh7G6s787787NQR8erBYz6fKDu+KNMrn5RuD6Y= +github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= +github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= -github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374mizHuIWj+OSJCajGr/phAmuMug9qIX3l9CflE= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= -github.com/muesli/ansi v0.0.0-20211018074035-2e021307bc4b/go.mod h1:fQuZ0gauxyBcmsdE3ZT4NasjaRdxmbCS0jRHsrWu3Ho= -github.com/muesli/ansi v0.0.0-20211031195517-c9f0611b6c70 h1:kMlmsLSbjkikxQJ1IPwaM+7LJ9ltFu/fi8CRzvSnQmA= -github.com/muesli/ansi v0.0.0-20211031195517-c9f0611b6c70/go.mod h1:fQuZ0gauxyBcmsdE3ZT4NasjaRdxmbCS0jRHsrWu3Ho= +github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 h1:ZK8zHtRHOkbHy6Mmr5D264iyp3TiX5OmNcI5cIARiQI= +github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6/go.mod h1:CJlz5H+gyd6CUWT45Oy4q24RdLyn7Md9Vj2/ldJBSIo= github.com/muesli/cancelreader v0.2.2 h1:3I4Kt4BQjOR54NavqnDogx/MIoWBFa0StPA8ELUXHmA= github.com/muesli/cancelreader v0.2.2/go.mod h1:3XuTXfFS2VjM+HTLZY9Ak0l6eUKfijIfMUZ4EgX0QYo= github.com/muesli/mango v0.1.0 h1:DZQK45d2gGbql1arsYA4vfg4d7I9Hfx5rX/GCmzsAvI= @@ -480,13 +526,10 @@ github.com/muesli/mango-cobra v1.2.0 h1:DQvjzAM0PMZr85Iv9LIMaYISpTOliMEg+uMFtNbY github.com/muesli/mango-cobra v1.2.0/go.mod h1:vMJL54QytZAJhCT13LPVDfkvCUJ5/4jNUKF/8NC2UjA= github.com/muesli/mango-pflag v0.1.0 h1:UADqbYgpUyRoBja3g6LUL+3LErjpsOwaC9ywvBWe7Sg= github.com/muesli/mango-pflag v0.1.0/go.mod h1:YEQomTxaCUp8PrbhFh10UfbhbQrM/xJ4i2PB8VTLLW0= -github.com/muesli/reflow v0.3.0 h1:IFsN6K9NfGtjeggFP+68I4chLZV2yIKsXJFNZ+eWh6s= -github.com/muesli/reflow v0.3.0/go.mod h1:pbwTDkVPibjO2kyvBQRBxTWEEGDGq0FlB1BIKtnHY/8= github.com/muesli/roff v0.1.0 h1:YD0lalCotmYuF5HhZliKWlIx7IEhiXeSfq7hNjFqGF8= github.com/muesli/roff v0.1.0/go.mod h1:pjAHQM9hdUUwm/krAfrLGgJkXJ+YuhtsfZ42kieB2Ig= -github.com/muesli/termenv v0.11.1-0.20220212125758-44cd13922739/go.mod h1:Bd5NYQ7pd+SrtBSrSNoBBmXlcY8+Xj4BMJgh8qcZrvs= -github.com/muesli/termenv v0.15.2 h1:GohcuySI0QmI3wN8Ok9PtKGkgkFIk7y6Vpb5PvrY+Wo= -github.com/muesli/termenv v0.15.2/go.mod h1:Epx+iuz8sNs7mNKhxzH4fWXGNpZwUaJKRS1noLXviQ8= +github.com/muesli/termenv v0.16.0 h1:S5AlUN9dENB57rsbnkPyfdGuWIlkmzJjbFf0Tf5FWUc= +github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3VfY/Cnk= github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= @@ -501,27 +544,30 @@ github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= -github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/pborman/getopt v0.0.0-20180811024354-2b5b3bfb099b/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o= +github.com/pborman/uuid v1.2.1 h1:+ZZIw58t/ozdjRaXh/3awHfmWRbzYxJoAdNJxe/3pvw= +github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= -github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= -github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= -github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= +github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= +github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= +github.com/pjbgf/sha1cd v0.3.2 h1:a9wb0bp1oC2TGwStyn0Umc/IGKQnEgF0vVaZ8QF8eo4= +github.com/pjbgf/sha1cd v0.3.2/go.mod h1:zQWigSxVmsHEZow5qaLtPYxpcKMMQpa09ixqBxuCS6A= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/polydawn/refmt v0.89.1-0.20221221234430-40501e09de1f h1:VXTQfuJj9vKR4TCkEuWIckKvdHFeJH/huIFJ9/cXOB0= github.com/polydawn/refmt v0.89.1-0.20221221234430-40501e09de1f/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= @@ -529,113 +575,143 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= -github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= -github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= -github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/sagikazarmark/locafero v0.9.0 h1:GbgQGNtTrEmddYDSAH9QLRyfAHY12md+8YFTqyMTC9k= +github.com/sagikazarmark/locafero v0.9.0/go.mod h1:UBUyz37V+EdMS3hDF3QWIiVr/2dPrx49OMO0Bn0hJqk= +github.com/sassoftware/relic v7.2.1+incompatible h1:Pwyh1F3I0r4clFJXkSI8bOyJINGqpgjJU3DYAZeI05A= +github.com/sassoftware/relic v7.2.1+incompatible/go.mod h1:CWfAxv73/iLZ17rbyhIEq3K9hs5w6FpNMdUT//qR+zk= github.com/scylladb/go-set v1.0.3-0.20200225121959-cc7b2070d91e h1:7q6NSFZDeGfvvtIRwBrU/aegEYJYmvev0cHAwo17zZQ= github.com/scylladb/go-set v1.0.3-0.20200225121959-cc7b2070d91e/go.mod h1:DkpGd78rljTxKAnTDPFqXSGxvETQnJyuSOQwsHycqfs= -github.com/secure-systems-lab/go-securesystemslib v0.8.0 h1:mr5An6X45Kb2nddcFlbmfHkLguCE9laoZCUzEEpIZXA= -github.com/secure-systems-lab/go-securesystemslib v0.8.0/go.mod h1:UH2VZVuJfCYR8WgMlCU1uFsOUU+KeyrTWcSS73NBOzU= +github.com/secure-systems-lab/go-securesystemslib v0.9.0 h1:rf1HIbL64nUpEIZnjLZ3mcNEL9NBPB0iuVjyxvq3LZc= +github.com/secure-systems-lab/go-securesystemslib v0.9.0/go.mod h1:DVHKMcZ+V4/woA/peqr+L0joiRXbPpQ042GgJckkFgw= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= -github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= -github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shibumi/go-pathspec v1.3.0 h1:QUyMZhFo0Md5B8zV8x2tesohbb5kfbpTi9rBnKh5dkI= +github.com/shibumi/go-pathspec v1.3.0/go.mod h1:Xutfslp817l2I1cZvgcfeMQJG5QnU2lh5tVaaMCl3jE= +github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= +github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sigstore/cosign/v2 v2.2.4 h1:iY4vtEacmu2hkNj1Fh+8EBqBwKs2DHM27/lbNWDFJro= -github.com/sigstore/cosign/v2 v2.2.4/go.mod h1:JZlRD2uaEjVAvZ1XJ3QkkZJhTqSDVtLaet+C/TMR81Y= -github.com/sigstore/rekor v1.3.6 h1:QvpMMJVWAp69a3CHzdrLelqEqpTM3ByQRt5B5Kspbi8= -github.com/sigstore/rekor v1.3.6/go.mod h1:JDTSNNMdQ/PxdsS49DJkJ+pRJCO/83nbR5p3aZQteXc= -github.com/sigstore/sigstore v1.8.3 h1:G7LVXqL+ekgYtYdksBks9B38dPoIsbscjQJX/MGWkA4= -github.com/sigstore/sigstore v1.8.3/go.mod h1:mqbTEariiGA94cn6G3xnDiV6BD8eSLdL/eA7bvJ0fVs= +github.com/sigstore/cosign/v2 v2.5.0 h1:1aRfPgRQHHlODI3Mvs/JkPBS9dJT9bRLCuHZgnHxFt8= +github.com/sigstore/cosign/v2 v2.5.0/go.mod h1:2V2hmo+jjFNnDb5Q5VL6PXvLU9Vujio7T5yldrpNTRw= +github.com/sigstore/protobuf-specs v0.4.1 h1:5SsMqZbdkcO/DNHudaxuCUEjj6x29tS2Xby1BxGU7Zc= +github.com/sigstore/protobuf-specs v0.4.1/go.mod h1:+gXR+38nIa2oEupqDdzg4qSBT0Os+sP7oYv6alWewWc= +github.com/sigstore/rekor v1.3.9 h1:sUjRpKVh/hhgqGMs0t+TubgYsksArZ6poLEC3MsGAzU= +github.com/sigstore/rekor v1.3.9/go.mod h1:xThNUhm6eNEmkJ/SiU/FVU7pLY2f380fSDZFsdDWlcM= +github.com/sigstore/sigstore v1.9.3 h1:y2qlTj+vh+Or3ictKuR3JUFawZPdDxAjrWkeFhon0OQ= +github.com/sigstore/sigstore v1.9.3/go.mod h1:VwYkiw0G0dRtwL25KSs04hCyVFF6CYMd/qvNeYrl7EQ= +github.com/sigstore/sigstore-go v0.7.1 h1:lyzi3AjO6+BHc5zCf9fniycqPYOt3RaC08M/FRmQhVY= +github.com/sigstore/sigstore-go v0.7.1/go.mod h1:AIRj4I3LC82qd07VFm3T2zXYiddxeBV1k/eoS8nTz0E= +github.com/sigstore/timestamp-authority v1.2.5 h1:W22JmwRv1Salr/NFFuP7iJuhytcZszQjldoB8GiEdnw= +github.com/sigstore/timestamp-authority v1.2.5/go.mod h1:gWPKWq4HMWgPCETre0AakgBzcr9DRqHrsgbrRqsigOs= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/skeema/knownhosts v1.2.2 h1:Iug2P4fLmDw9f41PB6thxUkNUkJzB5i+1/exaj40L3A= -github.com/skeema/knownhosts v1.2.2/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= -github.com/slack-go/slack v0.13.0 h1:7my/pR2ubZJ9912p9FtvALYpbt0cQPAqkRy2jaSI1PQ= -github.com/slack-go/slack v0.13.0/go.mod h1:hlGi5oXA+Gt+yWTPP0plCdRKmjsDxecdHxYQdlMQKOw= +github.com/skeema/knownhosts v1.3.1 h1:X2osQ+RAjK76shCbvhHHHVl3ZlgDm8apHEHFqRjnBY8= +github.com/skeema/knownhosts v1.3.1/go.mod h1:r7KTdC8l4uxWRyK2TpQZ/1o5HaSzh06ePQNxPwTcfiY= +github.com/slack-go/slack v0.17.3 h1:zV5qO3Q+WJAQ/XwbGfNFrRMaJ5T/naqaonyPV/1TP4g= +github.com/slack-go/slack v0.17.3/go.mod h1:X+UqOufi3LYQHDnMG1vxf0J8asC6+WllXrVrhl8/Prk= github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM= github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= -github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= -github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= -github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= -github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= -github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= +github.com/spf13/afero v1.14.0 h1:9tH6MapGnn/j0eb0yIXiLjERO8RB6xIVZRDCX7PtqWA= +github.com/spf13/afero v1.14.0/go.mod h1:acJQ8t0ohCGuMN3O+Pv0V0hgMxNYDlvdk+VTfyZmbYo= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= +github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= +github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4= +github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4= +github.com/spiffe/go-spiffe/v2 v2.5.0 h1:N2I01KCUkv1FAjZXJMwh95KK1ZIQLYbPfhaxw8WS0hE= +github.com/spiffe/go-spiffe/v2 v2.5.0/go.mod h1:P+NxobPc6wXhVtINNtFjNWGBTreew1GBUCwT2wPmb7g= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/theupdateframework/go-tuf v0.7.0 h1:CqbQFrWo1ae3/I0UCblSbczevCCbS31Qvs5LdxRWqRI= +github.com/theupdateframework/go-tuf v0.7.0/go.mod h1:uEB7WSY+7ZIugK6R1hiBMBjQftaFzn7ZCDJcp1tCUug= +github.com/theupdateframework/go-tuf/v2 v2.0.2 h1:PyNnjV9BJNzN1ZE6BcWK+5JbF+if370jjzO84SS+Ebo= +github.com/theupdateframework/go-tuf/v2 v2.0.2/go.mod h1:baB22nBHeHBCeuGZcIlctNq4P61PcOdyARlplg5xmLA= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= github.com/tomnomnom/linkheader v0.0.0-20180905144013-02ca5825eb80 h1:nrZ3ySNYwJbSpD6ce9duiP+QkD3JuLCcWkdaehUS/3Y= github.com/tomnomnom/linkheader v0.0.0-20180905144013-02ca5825eb80/go.mod h1:iFyPdL66DjUD96XmzVL3ZntbzcflLnznH0fr99w5VqE= +github.com/transparency-dev/merkle v0.0.2 h1:Q9nBoQcZcgPamMkGn7ghV8XiTZ/kRxn1yCG81+twTK4= +github.com/transparency-dev/merkle v0.0.2/go.mod h1:pqSy+OXefQ1EDUVmAJ8MUhHB9TXGuzVAT58PqBoHz1A= github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc= github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts= -github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk= +github.com/vbatts/tar-split v0.12.1 h1:CqKoORW7BUWBe7UL/iqTVvkTBOF8UvOMKOIZykxnnbo= +github.com/vbatts/tar-split v0.12.1/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= github.com/wagoodman/go-partybus v0.0.0-20230516145632-8ccac152c651 h1:jIVmlAFIqV3d+DOxazTR9v+zgj8+VYuQBzPgBZvWBHA= github.com/wagoodman/go-partybus v0.0.0-20230516145632-8ccac152c651/go.mod h1:b26F2tHLqaoRQf8DywqzVaV1MQ9yvjb0OMcNl7Nxu20= github.com/wagoodman/go-progress v0.0.0-20220614130704-4b1c25a33c7c h1:gFwUKtkv6QzQsFdIjvPqd0Qdw42DHUEbbUdiUTI1uco= github.com/wagoodman/go-progress v0.0.0-20220614130704-4b1c25a33c7c/go.mod h1:jLXFoL31zFaHKAAyZUh+sxiTDFe1L1ZHrcK2T1itVKA= github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= -github.com/whyrusleeping/cbor-gen v0.1.1-0.20240311221002-68b9f235c302 h1:MhInbXe4SzcImAKktUvWBCWZgcw6MYf5NfumTj1BhAw= -github.com/whyrusleeping/cbor-gen v0.1.1-0.20240311221002-68b9f235c302/go.mod h1:pM99HXyEbSQHcosHc0iW7YFmwnscr+t9Te4ibko05so= +github.com/whyrusleeping/cbor-gen v0.1.3-0.20240731173018-74d74643234c h1:Jmc9fHbd0LKFmS5CkLgczNUyW36UbiyvbHCG9xCTyiw= +github.com/whyrusleeping/cbor-gen v0.1.3-0.20240731173018-74d74643234c/go.mod h1:pM99HXyEbSQHcosHc0iW7YFmwnscr+t9Te4ibko05so= github.com/wk8/go-ordered-map/v2 v2.1.8 h1:5h/BUHu93oj4gIdvHHHGsScSTMijfx5PeYkE/fJgbpc= github.com/wk8/go-ordered-map/v2 v2.1.8/go.mod h1:5nJHM5DyteebpVlHnWMV0rPz6Zp7+xBAnxjb1X5vnTw= -github.com/xanzy/go-gitlab v0.105.0 h1:3nyLq0ESez0crcaM19o5S//SvezOQguuIHZ3wgX64hM= -github.com/xanzy/go-gitlab v0.105.0/go.mod h1:ETg8tcj4OhrB84UEgeE8dSuV/0h4BBL1uOV/qK0vlyI= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= +github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no= +github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM= +github.com/yosida95/uritemplate/v3 v3.0.2 h1:Ed3Oyj9yrmi9087+NczuL5BwkIc4wvTb5zIM+UJPGz4= +github.com/yosida95/uritemplate/v3 v3.0.2/go.mod h1:ILOh0sOhIJR3+L/8afwt/kE++YT040gmv5BQTMR2HP4= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/zeebo/errs v1.4.0 h1:XNdoD/RRMKP7HD0UhJnIzUy74ISdGGxURlYG8HSWSfM= +github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= gitlab.com/digitalxero/go-conventional-commit v1.0.7 h1:8/dO6WWG+98PMhlZowt/YjuiKhqhGlOCwlIV8SqqGh8= gitlab.com/digitalxero/go-conventional-commit v1.0.7/go.mod h1:05Xc2BFsSyC5tKhK0y+P3bs0AwUtNuTp+mTpbCU/DZ0= -go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80= -go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= +gitlab.com/gitlab-org/api/client-go v0.137.0 h1:H26yL44qnb38Czl20pEINCJrcj63W6/BX8iKPVUKQP0= +gitlab.com/gitlab-org/api/client-go v0.137.0/go.mod h1:AcAYES3lfkIS4zhso04S/wyUaWQmDYve2Fd9AF7C6qc= +go.mongodb.org/mongo-driver v1.17.3 h1:TQyXhnsWfWtgAhMtOgtYHMTkZIfBTpMTsMnd9ZBeHxQ= +go.mongodb.org/mongo-driver v1.17.3/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= -go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= -go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= -go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= -go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= -go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= -go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/detectors/gcp v1.36.0 h1:F7q2tNlCaHY9nMKHR6XH9/qkp8FktLnIcy6jJNyOCQw= +go.opentelemetry.io/contrib/detectors/gcp v1.36.0/go.mod h1:IbBN8uAIIx734PTonTPxAxnjc2pQTxWNkwfstZ+6H2k= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 h1:x7wzEgXfnzJcHDwStJT+mxOz4etr2EcexjqhBvmoakw= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0/go.mod h1:rg+RlpR5dKwaS95IyyZqj5Wd4E13lk/msnTS0Xl9lJM= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q= +go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg= +go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E= +go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE= +go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs= +go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs= +go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY= +go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis= +go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4= +go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w= +go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8= -go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= +go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= +go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= @@ -646,8 +722,8 @@ go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -gocloud.dev v0.37.0 h1:XF1rN6R0qZI/9DYjN16Uy0durAmSlf58DHOcb28GPro= -gocloud.dev v0.37.0/go.mod h1:7/O4kqdInCNsc6LqgmuFnS0GRew4XNNYWpA44yQnwco= +gocloud.dev v0.42.0 h1:qzG+9ItUL3RPB62/Amugws28n+4vGZXEoJEAMfjutzw= +gocloud.dev v0.42.0/go.mod h1:zkaYAapZfQisXOA4bzhsbA4ckiStGQ3Psvs9/OQ5dPM= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -655,20 +731,16 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= -golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= -golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM= +golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb h1:c0vyKkb6yr3KR7jEfJaOSv4lG7xPkbN6r52aJz1d8a8= -golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= +golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 h1:R84qjqJb5nVJMxqWYb3np9L5ZsaDtB+a39EqjV0JSUM= +golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0/go.mod h1:S9Xr4PYopiDyqSyp5NjCrhFrqg6A5zA2E/iPHPhqnS8= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -681,8 +753,8 @@ golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91 golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.26.0 h1:EGMPT//Ezu+ylkCijjPc+f4Aih7sZvaAr+O3EHBxvZg= +golang.org/x/mod v0.26.0/go.mod h1:/j6NAhSk8iQ723BGAUyoAcn7SlD7s15Dp9Nd/SfeaFQ= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -696,17 +768,15 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= -golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= -golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs= +golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.20.0 h1:4mQdhULixXKP1rwYBW0vAijoXnkTG0BLCDRzfe1idMo= -golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -717,8 +787,8 @@ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= +golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -731,45 +801,40 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220204135822-1c1b9b1eba6a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA= +golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= -golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw= -golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= +golang.org/x/term v0.33.0 h1:NuFncQrRcaRvVmgRkvM3j/F00gWIAlcmlB8ACEKmGIg= +golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= -golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4= +golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU= +golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= +golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -787,34 +852,34 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= -golang.org/x/tools v0.21.0 h1:qc0xYgIbsSDt9EyWz05J5wfa7LOVW0YTLOXrqdLAWIw= -golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0= +golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= -golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= -google.golang.org/api v0.172.0 h1:/1OcMZGPmW1rX2LCu2CmGUD1KXK1+pfzxotxyRUCCdk= -google.golang.org/api v0.172.0/go.mod h1:+fJZq6QXWfa9pXhnIzsjx4yI22d4aI9ZpLb58gvXjis= +golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY= +golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= +google.golang.org/api v0.235.0 h1:C3MkpQSRxS1Jy6AkzTGKKrpSCOd2WOGrezZ+icKSkKo= +google.golang.org/api v0.235.0/go.mod h1:QpeJkemzkFKe5VCE/PMv7GsUfn9ZF+u+q1Q7w6ckxTg= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20240311173647-c811ad7063a7 h1:ImUcDPHjTrAqNhlOkSocDLfG9rrNHH7w7uoKWPaWZ8s= -google.golang.org/genproto v0.0.0-20240311173647-c811ad7063a7/go.mod h1:/3XmxOjePkvmKrHuBy4zNFw7IzxJXtAgdpXi8Ll990U= -google.golang.org/genproto/googleapis/api v0.0.0-20240311173647-c811ad7063a7 h1:oqta3O3AnlWbmIE3bFnWbu4bRxZjfbWCp0cKSuZh01E= -google.golang.org/genproto/googleapis/api v0.0.0-20240311173647-c811ad7063a7/go.mod h1:VQW3tUculP/D4B+xVCo+VgSq8As6wA9ZjHl//pmk+6s= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 h1:NnYq6UN9ReLM9/Y01KWNOWyI5xQ9kbIms5GGJVwS/Yc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= +google.golang.org/genproto v0.0.0-20250603155806-513f23925822 h1:rHWScKit0gvAPuOnu87KpaYtjK5zBMLcULh7gxkCXu4= +google.golang.org/genproto v0.0.0-20250603155806-513f23925822/go.mod h1:HubltRL7rMh0LfnQPkMH4NPDFEWp0jw3vixw7jEM53s= +google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 h1:oWVWY3NzT7KJppx2UKhKmzPq4SRe0LdCijVRwvGeikY= +google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822/go.mod h1:h3c4v36UTKzUiuaOKQ6gr3S+0hovBtUrXzTG/i3+XEc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 h1:fc6jSaCT0vBduLYZHYrBBNY4dsWuvgyff9noRNDdBeE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk= -google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= +google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok= +google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -824,18 +889,14 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= +google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc h1:2gGKlE2+asNV9m7xrywl36YYNnBG5ZQ0r/BOOxqPpmk= gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/go-jose/go-jose.v2 v2.6.3 h1:nt80fvSDlhKWQgSWyHyy5CfmlQr+asih51R8PTWNKKs= -gopkg.in/go-jose/go-jose.v2 v2.6.3/go.mod h1:zzZDPkNNw/c9IE7Z9jr11mBZQhKQTMzoEEIoEdZlFBI= -gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= -gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/mail.v2 v2.3.1 h1:WYFn/oANrAGP2C0dcV6/pbkPzv8yGzqTjPmTeO7qoXk= gopkg.in/mail.v2 v2.3.1/go.mod h1:htwXN1Qh09vZJ1NVKxQqHPBaCBbzKhp5GzuJEA4VJWw= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= @@ -843,7 +904,6 @@ gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRN gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= @@ -852,11 +912,13 @@ gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= lukechampine.com/blake3 v1.2.1 h1:YuqqRuaqsGV71BV/nm9xlI0MKUv4QC54jQnBChWbGnI= lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= -sigs.k8s.io/kind v0.23.0 h1:8fyDGWbWTeCcCTwA04v4Nfr45KKxbSPH1WO9K+jVrBg= -sigs.k8s.io/kind v0.23.0/go.mod h1:ZQ1iZuJLh3T+O8fzhdi3VWcFTzsdXtNv2ppsHc8JQ7s= +sigs.k8s.io/kind v0.27.0 h1:PQ3f0iAWNIj66LYkZ1ivhEg/+Zb6UPMbO+qVei/INZA= +sigs.k8s.io/kind v0.27.0/go.mod h1:RZVFmy6qcwlSWwp6xeIUv7kXCPF3i8MXsEXxW/J+gJY= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= -software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k= -software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI= +software.sslmate.com/src/go-pkcs12 v0.5.0 h1:EC6R394xgENTpZ4RltKydeDUjtlM5drOYIG9c6TVj2M= +software.sslmate.com/src/go-pkcs12 v0.5.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI= diff --git a/.bingo/kind.mod b/.bingo/kind.mod index eece79efb..589ee9e90 100644 --- a/.bingo/kind.mod +++ b/.bingo/kind.mod @@ -2,4 +2,4 @@ module _ // Auto generated by https://github.com/bwplotka/bingo. DO NOT EDIT go 1.24.6 -require sigs.k8s.io/kind v0.30.0 +require sigs.k8s.io/kind v0.31.0 diff --git a/.bingo/kind.sum b/.bingo/kind.sum index 699cf969b..fadffa939 100644 --- a/.bingo/kind.sum +++ b/.bingo/kind.sum @@ -29,5 +29,7 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= sigs.k8s.io/kind v0.30.0 h1:2Xi1KFEfSMm0XDcvKnUt15ZfgRPCT0OnCBbpgh8DztY= sigs.k8s.io/kind v0.30.0/go.mod h1:FSqriGaoTPruiXWfRnUXNykF8r2t+fHtK0P0m1AbGF8= +sigs.k8s.io/kind v0.31.0 h1:UcT4nzm+YM7YEbqiAKECk+b6dsvc/HRZZu9U0FolL1g= +sigs.k8s.io/kind v0.31.0/go.mod h1:FSqriGaoTPruiXWfRnUXNykF8r2t+fHtK0P0m1AbGF8= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/.bingo/kube-score.mod b/.bingo/kube-score.mod new file mode 100644 index 000000000..873a8ecb7 --- /dev/null +++ b/.bingo/kube-score.mod @@ -0,0 +1,5 @@ +module _ // Auto generated by https://github.com/bwplotka/bingo. DO NOT EDIT + +go 1.24.6 + +require github.com/zegl/kube-score v1.20.0 // cmd/kube-score diff --git a/.bingo/kube-score.sum b/.bingo/kube-score.sum new file mode 100644 index 000000000..9a4cabc8e --- /dev/null +++ b/.bingo/kube-score.sum @@ -0,0 +1,98 @@ +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/eidolon/wordwrap v0.0.0-20161011182207-e0f54129b8bb h1:ioQwBmKdOCpMVS/bDaESqNWXIE/aw4+gsVtysCGMWZ4= +github.com/eidolon/wordwrap v0.0.0-20161011182207-e0f54129b8bb/go.mod h1:ZAPs+OyRzeVJFGvXVDVffgCzQfjg3qU9Ig8G/MU3zZ4= +github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report/v2 v2.1.0 h1:X3+hPYlSczH9IMIpSC9CQSZA0L+BipYafciZUWHEmsc= +github.com/jstemmer/go-junit-report/v2 v2.1.0/go.mod h1:mgHVr7VUo5Tn8OLVr1cKnLuEy0M92wdRntM99h7RkgQ= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/zegl/kube-score v1.20.0 h1:J1VqK86SunV4Gg8emPTmwUVxe0rmXnAs5K9ZUbGMKR8= +github.com/zegl/kube-score v1.20.0/go.mod h1:mBOw3S3g7TBG/GziT8xNG15dCFn54/jUeEHndxLinE8= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.36.0 h1:vWF2fRbw4qslQsQzgFqZff+BItCvGFQqKzKIzx1rmoA= +golang.org/x/net v0.36.0/go.mod h1:bFmbeoIPfrw4sMHNhb4J9f6+tPziuGjq7Jk/38fxi1I= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= +golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o= +golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM= +golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls= +k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k= +k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U= +k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= +k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= +sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= +sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= +sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/.bingo/variables.env b/.bingo/variables.env index fc3a980e0..9a7fb41fa 100644 --- a/.bingo/variables.env +++ b/.bingo/variables.env @@ -10,7 +10,9 @@ fi BINGO="${GOBIN}/bingo-v0.9.0" -CONTROLLER_GEN="${GOBIN}/controller-gen-v0.19.0" +CONFTEST="${GOBIN}/conftest-v0.62.0" + +CONTROLLER_GEN="${GOBIN}/controller-gen-v0.20.0" CRD_DIFF="${GOBIN}/crd-diff-v0.5.0" @@ -18,13 +20,15 @@ CRD_REF_DOCS="${GOBIN}/crd-ref-docs-v0.2.0" GOJQ="${GOBIN}/gojq-v0.12.17" -GOLANGCI_LINT="${GOBIN}/golangci-lint-v2.6.2" +GOLANGCI_LINT="${GOBIN}/golangci-lint-v2.7.2" -GORELEASER="${GOBIN}/goreleaser-v1.26.2" +GORELEASER="${GOBIN}/goreleaser-v2.11.2" HELM="${GOBIN}/helm-v3.18.4" -KIND="${GOBIN}/kind-v0.30.0" +KIND="${GOBIN}/kind-v0.31.0" + +KUBE_SCORE="${GOBIN}/kube-score-v1.20.0" KUSTOMIZE="${GOBIN}/kustomize-v5.7.1" diff --git a/.claude/commands/api-lint-diff.md b/.claude/commands/api-lint-diff.md new file mode 100644 index 000000000..549809bc4 --- /dev/null +++ b/.claude/commands/api-lint-diff.md @@ -0,0 +1,200 @@ +--- +description: Validate API issues using kube-api-linter with diff-aware analysis +--- + +# API Lint Diff + +Validates API issues in `api/` directory using kube-api-linter with diff-aware analysis that distinguishes between FIXED, NEW, and PRE-EXISTING issues. + +## Instructions for Claude AI + +When this command is invoked, you MUST: + +**CRITICAL:** The final output MUST be a comprehensive analysis report displayed directly in the conversation for the user to read. Do NOT just create temp files - output the full report as your response. + +1. **Execute the shell script** + ```bash + bash hack/api-lint-diff/run.sh + ``` + +2. **Understand the shell script's output**: + - **False positives (IGNORED)**: Standard CRD scaffolding patterns that kube-api-linter incorrectly flags + - **FIXED issues (SUCCESS)**: Issues that existed in baseline but were resolved in current branch → Celebrate! 🎉 + - **NEW issues (ERRORS)**: Introduced in current branch → MUST fix + - **PRE-EXISTING issues (WARNINGS)**: Existed before changes → Can fix separately + +3. **Filter false positives** - Operator projects scaffold standard Kubernetes CRD patterns that kube-api-linter incorrectly flags as errors, even with WhenRequired configuration. + + **Scenario 1: optionalfields on Status field** + ```go + Status MyResourceStatus `json:"status,omitzero"` + ``` + **Error reported:** + ``` + optionalfields: field Status has a valid zero value ({}), but the validation + is not complete (e.g. min properties/adding required fields). The field should + be a pointer to allow the zero value to be set. If the zero value is not a + valid use case, complete the validation and remove the pointer. + ``` + **Why it's a FALSE POSITIVE:** + - Status is NEVER a pointer in any Kubernetes API + - Works correctly with `omitzero` tag + - Validation incompleteness is expected - Status is controller-managed, not user-provided + - **ACTION: IGNORE this error** + + **Scenario 2: nonpointerstructs on Spec field** + ```go + Spec MyResourceSpec `json:"spec"` + ``` + **Error reported:** + ``` + requiredfields: field Spec has a valid zero value ({}), but the validation is + not complete (e.g. min properties/adding required fields). The field should be + a pointer to allow the zero value to be set. If the zero value is not a valid + use case, complete the validation and remove the pointer. + ``` + **Why it's a FALSE POSITIVE:** + - Spec is NEVER a pointer in Kubernetes APIs + - Scaffolds are starting points - users add validation when they implement their business logic + - **ACTION: IGNORE this error** + + **Scenario 3: conditions markers on metav1.Condition** + ```go + Conditions []metav1.Condition `json:"conditions,omitempty"` + ``` + **Error reported:** + ``` + conditions: Conditions field is missing the following markers: + patchStrategy=merge, patchMergeKey=type + ``` + **Why it's a FALSE POSITIVE:** + - `metav1.Condition` already handles patches correctly + - Adding these markers is redundant for this standard Kubernetes type + - **ACTION: IGNORE this error** + +4. **For reported issues, provide intelligent analysis**: + + a. **Categorize by fix complexity**: + - NON-BREAKING: Marker replacements, adding listType, adding +required/+optional + - BREAKING: Pointer conversions, type changes (int→int32) + + b. **Search for actual usage** (REQUIRED FOR ALL ISSUES - NOT OPTIONAL): + - **CRITICAL:** Do NOT just look at JSON tags - analyze actual code usage patterns + - **Exception:** Deprecated marker replacements (`+kubebuilder:validation:Required` → `+required`) are straightforward - no usage analysis needed + - **For all other issues:** MUST analyze actual usage before making recommendations + - Use grep to find ALL occurrences where each field is: + * **Read/accessed**: `obj.Spec.FieldName`, `cat.Spec.Priority` + * **Written/set**: `obj.Spec.FieldName = value` + * **Checked for zero/nil**: `if obj.Spec.FieldName == ""`, `if ptr != nil` + * **Used in conditionals**: Understand semantic meaning of zero values + - Search in: controllers, reconcilers, internal packages, tests, examples + - **Smart assessment based on usage patterns**: + * Field ALWAYS set in code? → Should be **required**, no omitempty + * Field SOMETIMES set? → Should be **optional** with omitempty + * Code checks `if field == zero`? → May need **pointer** to distinguish zero vs unset + * Zero value semantically valid? → Keep as value type with omitempty + * Zero value semantically invalid? → Use pointer OR mark required + * Field never read but only set by controller? → Likely Status field + - **Example analysis workflow for a field**: + ``` + 1. Grep for field usage: `CatalogFilter.Version` + 2. Found 5 occurrences: + - controllers/extension.go:123: if filter.Version != "" { ... } + - controllers/extension.go:456: result.Version = bundle.Version + - tests/filter_test.go:89: Version: "1.2.3" + 3. Analysis: Version is checked for empty, sometimes set, sometimes omitted + 4. Recommendation: Optional with omitempty (current usage supports this) + ``` + + c. **Generate EXACT code fixes** grouped by file: + - Show current code + - Show replacement code, ready to copy and paste + - **Explain why based on actual usage analysis** (not just JSON tags): + * Include usage summary: "Found N occurrences" + * Cite specific examples: "Used in resolve/catalog.go:163 as direct int32" + * Explain semantic meaning: "Field distinguishes priority 0 vs unset" + * Justify recommendation: "Since code checks for empty, should be optional" + - Note breaking change impact with reasoning + - **Each fix MUST include evidence from code usage** + + d. **Prioritize recommendations**: + - NEW issues first (must fix) + - Group PRE-EXISTING by NON-BREAKING vs BREAKING + +5. **Present actionable report directly to user**: + - **IMPORTANT:** Output the full comprehensive analysis in the conversation (not just to a temp file) + - Summary: False positives filtered, NEW count, PRE-EXISTING count + - Group issues by file and fix type + - Provide code snippets ready to apply (current code → fixed code) + - **DO NOT include "Next Steps" or "Conclusion" sections** - just present the analysis + + **Report Structure:** + ``` + # API Lint Diff Analysis Report + + **Generated:** [date] + **Baseline:** main branch (X issues) + **Current:** [branch name] (Y issues) + **Status:** [status icon and message based on logic below] + + **Status Logic:** + - ✅ PASSED: 0 new issues (fixed issues are OK) + - ⚠️ WARN: 0 new issues but has pre-existing issues + - ❌ FAIL: Has new issues that must be fixed + + ## Executive Summary + - Baseline issues: X + - Current issues: Y + - **FIXED**: F (issues resolved in this branch) + - **NEW**: N (issues introduced in this branch) + - **PRE-EXISTING**: P (issues that still remain) + - False positives (IGNORED): Z + + ## FIXED ISSUES (F issues) + + [List of issues that were fixed in this branch - show the baseline line numbers] + + ## NEW ISSUES (N issues) + + [List of issues introduced in this branch - these MUST be fixed] + + ## PRE-EXISTING ISSUES (P issues) + + [List of issues that existed before and still exist - can be fixed separately] + + --- + + ## DETAILED ANALYSIS FOR ISSUES NEEDING FIXES + + ### Category 1: [Issue Type] (N issues) - [BREAKING/NON-BREAKING] + + #### File: [filename] + + **[Issue #]. Line X - [Field Name]** + ```go + // CURRENT: + [current code] + + // FIX: + [fixed code] + ``` + **Usage Analysis:** + - Found N occurrences in codebase + - [Specific usage example 1]: path/file.go:123 + - [Specific usage example 2]: path/file.go:456 + - Pattern: [always set / sometimes set / checked for zero / etc.] + + **Why:** [Recommendation based on usage analysis with evidence] + **Breaking:** [YES/NO] ([detailed reason with impact]) + + [Repeat for all issues] + + ## Summary of Breaking Changes + [Table of breaking changes if any] + ``` + +## Related Documentation + +- [Kubernetes API Conventions](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md) +- [kube-api-linter](https://github.com/kubernetes/kubernetes/tree/master/staging/src/k8s.io/code-generator/cmd/kube-api-linter) +- AGENTS.md in this repository for understanding operator patterns diff --git a/.goreleaser.yml b/.goreleaser.yml index 720014214..e2807ca41 100644 --- a/.goreleaser.yml +++ b/.goreleaser.yml @@ -1,3 +1,4 @@ +version: 2 before: hooks: - go mod tidy @@ -116,7 +117,7 @@ docker_manifests: checksum: name_template: 'checksums.txt' snapshot: - name_template: "{{ incpatch .Version }}-next" + version_template: "{{ incpatch .Version }}-next" changelog: use: github-native disable: '{{ ne .Env.ENABLE_RELEASE_PIPELINE "true" }}' diff --git a/.tilt-support b/.tilt-support index 9cb01b152..dcd827d04 100644 --- a/.tilt-support +++ b/.tilt-support @@ -14,7 +14,7 @@ def deploy_cert_manager_if_needed(): docker_build( ref='helper', context='.', - build_args={'GO_VERSION': '1.24'}, + build_args={'GO_VERSION': '1.25'}, dockerfile_contents=''' ARG GO_VERSION FROM golang:${GO_VERSION} diff --git a/AGENTS.md b/AGENTS.md index e1a7626f6..07dd2e6be 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -289,7 +289,6 @@ Two manifest variants exist: - `/go.mod` - Dependencies (use `make tidy`, avoid Go version bumps without discussion) - `/PROJECT` - Kubebuilder project config - `/OWNERS` & `/OWNERS_ALIASES` - Maintainer lists -- `/CODEOWNERS` - Code ownership - `/mkdocs.yml` - Documentation site config - `/.bingo/*.mod` - Tool dependencies (managed by bingo) - `/.golangci.yaml` - Linter configuration diff --git a/CODEOWNERS b/CODEOWNERS deleted file mode 100644 index 1578d1c0f..000000000 --- a/CODEOWNERS +++ /dev/null @@ -1 +0,0 @@ -* @operator-framework/operator-controller-maintainers diff --git a/Makefile b/Makefile index 17025fd9f..36c56488e 100644 --- a/Makefile +++ b/Makefile @@ -62,6 +62,9 @@ ifeq ($(origin KIND_CLUSTER_NAME), undefined) KIND_CLUSTER_NAME := operator-controller endif +ifeq ($(origin KIND_CONFIG), undefined) +KIND_CONFIG := ./kind-config/kind-config.yaml +endif ifneq (, $(shell command -v docker 2>/dev/null)) CONTAINER_RUNTIME := docker @@ -118,9 +121,25 @@ help-extended: #HELP Display extended help. lint: lint-custom $(GOLANGCI_LINT) #HELP Run golangci linter. $(GOLANGCI_LINT) run --build-tags $(GO_BUILD_TAGS) $(GOLANGCI_LINT_ARGS) -lint-helm: $(HELM) #HELP Run helm linter +.PHONY: lint-helm +lint-helm: $(HELM) $(CONFTEST) #HELP Run helm linter helm lint helm/olmv1 helm lint helm/prometheus + (set -euo pipefail; helm template olmv1 helm/olmv1; helm template prometheus helm/prometheus) | $(CONFTEST) test --policy hack/conftest/policy/ --combine -n main -n prometheus - + +.PHONY: lint-deployed-resources +lint-deployed-resources: $(KUBE_SCORE) #EXHELP Lint deployed resources. + (for ns in $$(printf "olmv1-system\n%s\n" "$(CATD_NAMESPACE)" | uniq); do \ + for resource in $$(kubectl api-resources --verbs=list --namespaced -o name); do \ + kubectl get $$resource -n $$ns -o yaml ; \ + echo "---" ; \ + done \ + done) | $(KUBE_SCORE) score - \ + `# TODO: currently these checks are failing, decide if resources should be fixed for them to pass (https://github.com/operator-framework/operator-controller/issues/2398)` \ + --ignore-test container-resources \ + --ignore-test container-image-pull-policy \ + --ignore-test container-ephemeral-storage-request-and-limit \ + --ignore-test container-security-context-user-group-id .PHONY: custom-linter-build custom-linter-build: #EXHELP Build custom linter @@ -161,9 +180,10 @@ $(EXPERIMENTAL_MANIFEST) ?= helm/cert-manager.yaml helm/experimental.yaml $(EXPERIMENTAL_E2E_MANIFEST) ?= helm/cert-manager.yaml helm/experimental.yaml helm/e2e.yaml HELM_SETTINGS ?= .PHONY: $(MANIFESTS) -$(MANIFESTS): $(HELM) +$(MANIFESTS): $(HELM) $(CONFTEST) @mkdir -p $(MANIFEST_HOME) $(HELM) template olmv1 helm/olmv1 $(addprefix --values ,$($@)) $(addprefix --set ,$(HELM_SETTINGS)) > $@ + $(CONFTEST) test --policy hack/conftest/policy/ -n main --combine $@ # Generate manifests stored in source-control .PHONY: manifests @@ -215,7 +235,7 @@ test: manifests generate fmt lint test-unit test-e2e test-regression #HELP Run a .PHONY: e2e e2e: #EXHELP Run the e2e tests. - go test -count=1 -v ./test/e2e/... + go test -count=1 -v ./test/e2e/features_test.go E2E_REGISTRY_NAME := docker-registry E2E_REGISTRY_NAMESPACE := operator-controller-e2e @@ -266,7 +286,7 @@ image-registry: ## Build the testdata catalog used for e2e tests and push it to # or inject unintended characters into the binary (e.g., version metadata). go build $(GO_BUILD_FLAGS) $(GO_BUILD_EXTRA_FLAGS) -tags '$(GO_BUILD_TAGS)' -ldflags "$(GO_BUILD_LDFLAGS)" -gcflags '$(GO_BUILD_GCFLAGS)' -asmflags '$(GO_BUILD_ASMFLAGS)' -o ./testdata/push/bin/push ./testdata/push/push.go $(CONTAINER_RUNTIME) build -f ./testdata/Dockerfile -t $(E2E_REGISTRY_IMAGE) ./testdata - $(CONTAINER_RUNTIME) save $(E2E_REGISTRY_IMAGE) | $(KIND) load image-archive /dev/stdin --name $(KIND_CLUSTER_NAME) + $(KIND) load docker-image $(E2E_REGISTRY_IMAGE) --name $(KIND_CLUSTER_NAME) ./testdata/build-test-registry.sh $(E2E_REGISTRY_NAMESPACE) $(E2E_REGISTRY_NAME) $(E2E_REGISTRY_IMAGE) # When running the e2e suite, you can set the ARTIFACT_PATH variable to the absolute path @@ -285,6 +305,7 @@ test-e2e: run-internal image-registry prometheus e2e e2e-coverage kind-clean #HE .PHONY: test-experimental-e2e test-experimental-e2e: SOURCE_MANIFEST := $(EXPERIMENTAL_E2E_MANIFEST) test-experimental-e2e: KIND_CLUSTER_NAME := operator-controller-e2e +test-experimental-e2e: KIND_CONFIG := ./kind-config/kind-config-2node.yaml test-experimental-e2e: GO_BUILD_EXTRA_FLAGS := -cover test-experimental-e2e: COVERAGE_NAME := experimental-e2e test-experimental-e2e: export MANIFEST := $(EXPERIMENTAL_RELEASE_MANIFEST) @@ -385,8 +406,8 @@ stop-profiling: build-test-profiler #EXHELP Stop profiling and generate analysis .PHONY: kind-load kind-load: $(KIND) #EXHELP Loads the currently constructed images into the KIND cluster. - $(CONTAINER_RUNTIME) save $(OPCON_IMG) | $(KIND) load image-archive /dev/stdin --name $(KIND_CLUSTER_NAME) - $(CONTAINER_RUNTIME) save $(CATD_IMG) | $(KIND) load image-archive /dev/stdin --name $(KIND_CLUSTER_NAME) + $(KIND) load docker-image $(OPCON_IMG) --name $(KIND_CLUSTER_NAME) + $(KIND) load docker-image $(CATD_IMG) --name $(KIND_CLUSTER_NAME) .PHONY: kind-deploy kind-deploy: export DEFAULT_CATALOG := $(RELEASE_CATALOGS) @@ -411,8 +432,9 @@ kind-deploy-experimental: manifests .PHONY: kind-cluster kind-cluster: $(KIND) kind-verify-versions #EXHELP Standup a kind cluster. -$(KIND) delete cluster --name $(KIND_CLUSTER_NAME) - $(KIND) create cluster --name $(KIND_CLUSTER_NAME) --config ./kind-config.yaml + $(KIND) create cluster --name $(KIND_CLUSTER_NAME) --config $(KIND_CONFIG) $(KIND) export kubeconfig --name $(KIND_CLUSTER_NAME) + kubectl wait --for=condition=Ready nodes --all --timeout=2m .PHONY: kind-clean kind-clean: $(KIND) #EXHELP Delete the kind cluster. @@ -475,7 +497,7 @@ go-build-linux: export GOARCH=amd64 go-build-linux: $(BINARIES) .PHONY: run-internal -run-internal: docker-build kind-cluster kind-load kind-deploy wait +run-internal: docker-build kind-cluster kind-load kind-deploy lint-deployed-resources wait .PHONY: run run: SOURCE_MANIFEST := $(STANDARD_MANIFEST) diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES index 3d1381689..c2c73d7e8 100644 --- a/OWNERS_ALIASES +++ b/OWNERS_ALIASES @@ -1,5 +1,6 @@ aliases: olmv1-approvers: + - camilamacedo86 - grokspawn - joelanford - kevinrizza @@ -27,14 +28,10 @@ aliases: api-approvers: - grokspawn - - thetechnick catalogd-approvers: - grokspawn - operator-controller-approvers: - - thetechnick - cmd-approvers: - grokspawn @@ -50,4 +47,3 @@ aliases: docs-draft-approvers: - camilamacedo86 - grokspawn - - thetechnick diff --git a/api/v1/clustercatalog_types.go b/api/v1/clustercatalog_types.go index c18fa3c7e..fbd9a2dce 100644 --- a/api/v1/clustercatalog_types.go +++ b/api/v1/clustercatalog_types.go @@ -51,7 +51,7 @@ const ( //+kubebuilder:printcolumn:name="Serving",type=string,JSONPath=`.status.conditions[?(@.type=="Serving")].status` //+kubebuilder:printcolumn:name=Age,type=date,JSONPath=`.metadata.creationTimestamp` -// ClusterCatalog enables users to make File-Based Catalog (FBC) catalog data available to the cluster. +// ClusterCatalog makes File-Based Catalog (FBC) data available to your cluster. // For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs type ClusterCatalog struct { metav1.TypeMeta `json:",inline"` @@ -60,16 +60,14 @@ type ClusterCatalog struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata"` - // spec is the desired state of the ClusterCatalog. - // spec is required. - // The controller will work to ensure that the desired - // catalog is unpacked and served over the catalog content HTTP server. + // spec is a required field that defines the desired state of the ClusterCatalog. + // The controller ensures that the catalog is unpacked and served over the catalog content HTTP server. // +kubebuilder:validation:Required Spec ClusterCatalogSpec `json:"spec"` - // status contains information about the state of the ClusterCatalog such as: - // - Whether or not the catalog contents are being served via the catalog content HTTP server - // - Whether or not the ClusterCatalog is progressing to a new state + // status contains the following information about the state of the ClusterCatalog: + // - Whether the catalog contents are being served via the catalog content HTTP server + // - Whether the ClusterCatalog is progressing to a new state // - A reference to the source from which the catalog contents were retrieved // +optional Status ClusterCatalogStatus `json:"status,omitempty"` @@ -93,15 +91,12 @@ type ClusterCatalogList struct { // ClusterCatalogSpec defines the desired state of ClusterCatalog type ClusterCatalogSpec struct { - // source allows a user to define the source of a catalog. - // A "catalog" contains information on content that can be installed on a cluster. - // Providing a catalog source makes the contents of the catalog discoverable and usable by - // other on-cluster components. - // These on-cluster components may do a variety of things with this information, such as - // presenting the content in a GUI dashboard or installing content from the catalog on the cluster. + // source is a required field that defines the source of a catalog. + // A catalog contains information on content that can be installed on a cluster. + // The catalog source makes catalog contents discoverable and usable by other on-cluster components. + // These components can present the content in a GUI dashboard or install content from the catalog on the cluster. // The catalog source must contain catalog metadata in the File-Based Catalog (FBC) format. // For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs. - // source is a required field. // // Below is a minimal example of a ClusterCatalogSpec that sources a catalog from an image: // @@ -113,44 +108,40 @@ type ClusterCatalogSpec struct { // +kubebuilder:validation:Required Source CatalogSource `json:"source"` - // priority allows the user to define a priority for a ClusterCatalog. - // priority is optional. + // priority is an optional field that defines a priority for this ClusterCatalog. // - // A ClusterCatalog's priority is used by clients as a tie-breaker between ClusterCatalogs that meet the client's requirements. - // A higher number means higher priority. + // Clients use the ClusterCatalog priority as a tie-breaker between ClusterCatalogs that meet their requirements. + // Higher numbers mean higher priority. // - // It is up to clients to decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements. - // When deciding how to break the tie in this scenario, it is recommended that clients prompt their users for additional input. + // Clients decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements. + // Clients should prompt users for additional input to break the tie. // - // When omitted, the default priority is 0 because that is the zero value of integers. + // When omitted, the default priority is 0. // - // Negative numbers can be used to specify a priority lower than the default. - // Positive numbers can be used to specify a priority higher than the default. + // Use negative numbers to specify a priority lower than the default. + // Use positive numbers to specify a priority higher than the default. // // The lowest possible value is -2147483648. // The highest possible value is 2147483647. // // +kubebuilder:default:=0 - // +kubebuilder:validation:minimum:=-2147483648 - // +kubebuilder:validation:maximum:=2147483647 + // +kubebuilder:validation:Minimum:=-2147483648 + // +kubebuilder:validation:Maximum:=2147483647 // +optional Priority int32 `json:"priority"` - // availabilityMode allows users to define how the ClusterCatalog is made available to clients on the cluster. - // availabilityMode is optional. + // availabilityMode is an optional field that defines how the ClusterCatalog is made available to clients on the cluster. // - // Allowed values are "Available" and "Unavailable" and omitted. + // Allowed values are "Available", "Unavailable", or omitted. // // When omitted, the default value is "Available". // - // When set to "Available", the catalog contents will be unpacked and served over the catalog content HTTP server. - // Setting the availabilityMode to "Available" tells clients that they should consider this ClusterCatalog - // and its contents as usable. + // When set to "Available", the catalog contents are unpacked and served over the catalog content HTTP server. + // Clients should consider this ClusterCatalog and its contents as usable. // - // When set to "Unavailable", the catalog contents will no longer be served over the catalog content HTTP server. - // When set to this availabilityMode it should be interpreted the same as the ClusterCatalog not existing. - // Setting the availabilityMode to "Unavailable" can be useful in scenarios where a user may not want - // to delete the ClusterCatalog all together, but would still like it to be treated as if it doesn't exist. + // When set to "Unavailable", the catalog contents are no longer served over the catalog content HTTP server. + // Treat this the same as if the ClusterCatalog does not exist. + // Use "Unavailable" when you want to keep the ClusterCatalog but treat it as if it doesn't exist. // // +kubebuilder:validation:Enum:="Unavailable";"Available" // +kubebuilder:default:="Available" @@ -160,24 +151,23 @@ type ClusterCatalogSpec struct { // ClusterCatalogStatus defines the observed state of ClusterCatalog type ClusterCatalogStatus struct { - // conditions is a representation of the current state for this ClusterCatalog. + // conditions represents the current state of this ClusterCatalog. // // The current condition types are Serving and Progressing. // - // The Serving condition is used to represent whether or not the contents of the catalog is being served via the HTTP(S) web server. - // When it has a status of True and a reason of Available, the contents of the catalog are being served. - // When it has a status of False and a reason of Unavailable, the contents of the catalog are not being served because the contents are not yet available. - // When it has a status of False and a reason of UserSpecifiedUnavailable, the contents of the catalog are not being served because the catalog has been intentionally marked as unavailable. + // The Serving condition represents whether the catalog contents are being served via the HTTP(S) web server: + // - When status is True and reason is Available, the catalog contents are being served. + // - When status is False and reason is Unavailable, the catalog contents are not being served because the contents are not yet available. + // - When status is False and reason is UserSpecifiedUnavailable, the catalog contents are not being served because the catalog has been intentionally marked as unavailable. // - // The Progressing condition is used to represent whether or not the ClusterCatalog is progressing or is ready to progress towards a new state. - // When it has a status of True and a reason of Retrying, there was an error in the progression of the ClusterCatalog that may be resolved on subsequent reconciliation attempts. - // When it has a status of True and a reason of Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing. - // When it has a status of False and a reason of Blocked, there was an error in the progression of the ClusterCatalog that requires manual intervention for recovery. + // The Progressing condition represents whether the ClusterCatalog is progressing or is ready to progress towards a new state: + // - When status is True and reason is Retrying, an error occurred that may be resolved on subsequent reconciliation attempts. + // - When status is True and reason is Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing. + // - When status is False and reason is Blocked, an error occurred that requires manual intervention for recovery. // - // In the case that the Serving condition is True with reason Available and Progressing is True with reason Retrying, the previously fetched - // catalog contents are still being served via the HTTP(S) web server while we are progressing towards serving a new version of the catalog - // contents. This could occur when we've initially fetched the latest contents from the source for this catalog and when polling for changes - // to the contents we identify that there are updates to the contents. + // If the system initially fetched contents and polling identifies updates, both conditions can be active simultaneously: + // - The Serving condition remains True with reason Available because the previous contents are still served via the HTTP(S) web server. + // - The Progressing condition is True with reason Retrying because the system is working to serve the new version. // // +listType=map // +listMapKey=type @@ -189,30 +179,25 @@ type ClusterCatalogStatus struct { // urls contains the URLs that can be used to access the catalog. // +optional URLs *ClusterCatalogURLs `json:"urls,omitempty"` - // lastUnpacked represents the last time the contents of the - // catalog were extracted from their source format. As an example, - // when using an Image source, the OCI image will be pulled and the - // image layers written to a file-system backed cache. We refer to the - // act of this extraction from the source format as "unpacking". + // lastUnpacked represents the last time the catalog contents were extracted from their source format. + // For example, when using an Image source, the OCI image is pulled and image layers are written to a file-system backed cache. + // This extraction from the source format is called "unpacking". // +optional LastUnpacked *metav1.Time `json:"lastUnpacked,omitempty"` } // ClusterCatalogURLs contains the URLs that can be used to access the catalog. type ClusterCatalogURLs struct { - // base is a cluster-internal URL that provides endpoints for - // accessing the content of the catalog. + // base is a cluster-internal URL that provides endpoints for accessing the catalog content. // - // It is expected that clients append the path for the endpoint they wish - // to access. + // Clients should append the path for the endpoint they want to access. // - // Currently, only a single endpoint is served and is accessible at the path - // /api/v1. + // Currently, only a single endpoint is served and is accessible at the path /api/v1. // // The endpoints served for the v1 API are: - // - /all - this endpoint returns the entirety of the catalog contents in the FBC format + // - /all - this endpoint returns the entire catalog contents in the FBC format // - // As the needs of users and clients of the evolve, new endpoints may be added. + // New endpoints may be added as needs evolve. // // +kubebuilder:validation:Required // +kubebuilder:validation:MaxLength:=525 @@ -226,20 +211,19 @@ type ClusterCatalogURLs struct { // +union // +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'Image' ? has(self.image) : !has(self.image)",message="image is required when source type is Image, and forbidden otherwise" type CatalogSource struct { - // type is a reference to the type of source the catalog is sourced from. - // type is required. + // type is a required field that specifies the type of source for the catalog. // // The only allowed value is "Image". // - // When set to "Image", the ClusterCatalog content will be sourced from an OCI image. + // When set to "Image", the ClusterCatalog content is sourced from an OCI image. // When using an image source, the image field must be set and must be the only field defined for this type. // // +unionDiscriminator // +kubebuilder:validation:Enum:="Image" // +kubebuilder:validation:Required Type SourceType `json:"type"` - // image is used to configure how catalog contents are sourced from an OCI image. - // This field is required when type is Image, and forbidden otherwise. + // image configures how catalog contents are sourced from an OCI image. + // It is required when type is Image, and forbidden otherwise. // +optional Image *ImageSource `json:"image,omitempty"` } @@ -249,27 +233,26 @@ type CatalogSource struct { // +union // +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'Image' ? has(self.image) : !has(self.image)",message="image is required when source type is Image, and forbidden otherwise" type ResolvedCatalogSource struct { - // type is a reference to the type of source the catalog is sourced from. - // type is required. + // type is a required field that specifies the type of source for the catalog. // // The only allowed value is "Image". // - // When set to "Image", information about the resolved image source will be set in the 'image' field. + // When set to "Image", information about the resolved image source is set in the image field. // // +unionDiscriminator // +kubebuilder:validation:Enum:="Image" // +kubebuilder:validation:Required Type SourceType `json:"type"` - // image is a field containing resolution information for a catalog sourced from an image. - // This field must be set when type is Image, and forbidden otherwise. + // image contains resolution information for a catalog sourced from an image. + // It must be set when type is Image, and forbidden otherwise. Image *ResolvedImageSource `json:"image"` } // ResolvedImageSource provides information about the resolved source of a Catalog sourced from an image. type ResolvedImageSource struct { // ref contains the resolved image digest-based reference. - // The digest format is used so users can use other tooling to fetch the exact - // OCI manifests that were used to extract the catalog contents. + // The digest format allows you to use other tooling to fetch the exact OCI manifests + // that were used to extract the catalog contents. // +kubebuilder:validation:Required // +kubebuilder:validation:MaxLength:=1000 // +kubebuilder:validation:XValidation:rule="self.matches('^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])((\\\\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+)?(:[0-9]+)?\\\\b')",message="must start with a valid domain. valid domains must be alphanumeric characters (lowercase and uppercase) separated by the \".\" character." @@ -287,11 +270,10 @@ type ResolvedImageSource struct { // reject the resource since there is no use in polling a digest-based image reference. // +kubebuilder:validation:XValidation:rule="self.ref.find('(@.*:)') != \"\" ? !has(self.pollIntervalMinutes) : true",message="cannot specify pollIntervalMinutes while using digest-based image" type ImageSource struct { - // ref allows users to define the reference to a container image containing Catalog contents. - // ref is required. - // ref can not be more than 1000 characters. + // ref is a required field that defines the reference to a container image containing catalog contents. + // It cannot be more than 1000 characters. // - // A reference can be broken down into 3 parts - the domain, name, and identifier. + // A reference has 3 parts: the domain, name, and identifier. // // The domain is typically the registry where an image is located. // It must be alphanumeric characters (lowercase and uppercase) separated by the "." character. @@ -337,11 +319,10 @@ type ImageSource struct { // +kubebuilder:validation:XValidation:rule="self.find('(@.*:)') != \"\" ? self.find(':.*$').matches(':[0-9A-Fa-f]*$') : true",message="digest is not valid. the encoded string must only contain hex characters (A-F, a-f, 0-9)" Ref string `json:"ref"` - // pollIntervalMinutes allows the user to set the interval, in minutes, at which the image source should be polled for new content. - // pollIntervalMinutes is optional. - // pollIntervalMinutes can not be specified when ref is a digest-based reference. + // pollIntervalMinutes is an optional field that sets the interval, in minutes, at which the image source is polled for new content. + // You cannot specify pollIntervalMinutes when ref is a digest-based reference. // - // When omitted, the image will not be polled for new content. + // When omitted, the image is not polled for new content. // +kubebuilder:validation:Minimum:=1 // +optional PollIntervalMinutes *int `json:"pollIntervalMinutes,omitempty"` diff --git a/api/v1/clusterextension_types.go b/api/v1/clusterextension_types.go index fb82b7a23..2846b24c6 100644 --- a/api/v1/clusterextension_types.go +++ b/api/v1/clusterextension_types.go @@ -48,16 +48,15 @@ const ( // ClusterExtensionSpec defines the desired state of ClusterExtension type ClusterExtensionSpec struct { - // namespace is a reference to a Kubernetes namespace. - // This is the namespace in which the provided ServiceAccount must exist. - // It also designates the default namespace where namespace-scoped resources - // for the extension are applied to the cluster. + // namespace specifies a Kubernetes namespace. + // This is the namespace where the provided ServiceAccount must exist. + // It also designates the default namespace where namespace-scoped resources for the extension are applied to the cluster. // Some extensions may contain namespace-scoped resources to be applied in other namespaces. // This namespace must exist. // - // namespace is required, immutable, and follows the DNS label standard - // as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters or hyphens (-), - // start and end with an alphanumeric character, and be no longer than 63 characters + // The namespace field is required, immutable, and follows the DNS label standard as defined in [RFC 1123]. + // It must contain only lowercase alphanumeric characters or hyphens (-), start and end with an alphanumeric character, + // and be no longer than 63 characters. // // [RFC 1123]: https://tools.ietf.org/html/rfc1123 // @@ -67,20 +66,20 @@ type ClusterExtensionSpec struct { // +kubebuilder:validation:Required Namespace string `json:"namespace"` - // serviceAccount is a reference to a ServiceAccount used to perform all interactions - // with the cluster that are required to manage the extension. + // serviceAccount specifies a ServiceAccount used to perform all interactions with the cluster + // that are required to manage the extension. // The ServiceAccount must be configured with the necessary permissions to perform these interactions. // The ServiceAccount must exist in the namespace referenced in the spec. - // serviceAccount is required. + // The serviceAccount field is required. // // +kubebuilder:validation:Required ServiceAccount ServiceAccountReference `json:"serviceAccount"` - // source is a required field which selects the installation source of content - // for this ClusterExtension. Selection is performed by setting the sourceType. + // source is required and selects the installation source of content for this ClusterExtension. + // Set the sourceType field to perform the selection. // - // Catalog is currently the only implemented sourceType, and setting the - // sourcetype to "Catalog" requires the catalog field to also be defined. + // Catalog is currently the only implemented sourceType. + // Setting sourceType to "Catalog" requires the catalog field to also be defined. // // Below is a minimal example of a source definition (in yaml): // @@ -92,21 +91,20 @@ type ClusterExtensionSpec struct { // +kubebuilder:validation:Required Source SourceConfig `json:"source"` - // install is an optional field used to configure the installation options - // for the ClusterExtension such as the pre-flight check configuration. + // install is optional and configures installation options for the ClusterExtension, + // such as the pre-flight check configuration. // // +optional Install *ClusterExtensionInstallConfig `json:"install,omitempty"` - // config is an optional field used to specify bundle specific configuration - // used to configure the bundle. Configuration is bundle specific and a bundle may provide - // a configuration schema. When not specified, the default configuration of the resolved bundle will be used. + // config is optional and specifies bundle-specific configuration. + // Configuration is bundle-specific and a bundle may provide a configuration schema. + // When not specified, the default configuration of the resolved bundle is used. // // config is validated against a configuration schema provided by the resolved bundle. If the bundle does not provide // a configuration schema the bundle is deemed to not be configurable. More information on how // to configure bundles can be found in the OLM documentation associated with your current OLM version. // - // // +optional Config *ClusterExtensionConfig `json:"config,omitempty"` } @@ -118,13 +116,12 @@ const SourceTypeCatalog = "Catalog" // +union // +kubebuilder:validation:XValidation:rule="has(self.sourceType) && self.sourceType == 'Catalog' ? has(self.catalog) : !has(self.catalog)",message="catalog is required when sourceType is Catalog, and forbidden otherwise" type SourceConfig struct { - // sourceType is a required reference to the type of install source. + // sourceType is required and specifies the type of install source. // - // Allowed values are "Catalog" + // The only allowed value is "Catalog". // - // When this field is set to "Catalog", information for determining the - // appropriate bundle of content to install will be fetched from - // ClusterCatalog resources existing on the cluster. + // When set to "Catalog", information for determining the appropriate bundle of content to install + // is fetched from ClusterCatalog resources on the cluster. // When using the Catalog sourceType, the catalog field must also be set. // // +unionDiscriminator @@ -132,8 +129,8 @@ type SourceConfig struct { // +kubebuilder:validation:Required SourceType string `json:"sourceType"` - // catalog is used to configure how information is sourced from a catalog. - // This field is required when sourceType is "Catalog", and forbidden otherwise. + // catalog configures how information is sourced from a catalog. + // It is required when sourceType is "Catalog", and forbidden otherwise. // // +optional Catalog *CatalogFilter `json:"catalog,omitempty"` @@ -145,11 +142,11 @@ type SourceConfig struct { // +kubebuilder:validation:XValidation:rule="has(self.preflight)",message="at least one of [preflight] are required when install is specified" // +union type ClusterExtensionInstallConfig struct { - // preflight is an optional field that can be used to configure the checks that are - // run before installation or upgrade of the content for the package specified in the packageName field. + // preflight is optional and configures the checks that run before installation or upgrade + // of the content for the package specified in the packageName field. // // When specified, it replaces the default preflight configuration for install/upgrade actions. - // When not specified, the default configuration will be used. + // When not specified, the default configuration is used. // // +optional Preflight *PreflightConfig `json:"preflight,omitempty"` @@ -161,22 +158,20 @@ type ClusterExtensionInstallConfig struct { // +kubebuilder:validation:XValidation:rule="has(self.configType) && self.configType == 'Inline' ?has(self.inline) : !has(self.inline)",message="inline is required when configType is Inline, and forbidden otherwise" // +union type ClusterExtensionConfig struct { - // configType is a required reference to the type of configuration source. + // configType is required and specifies the type of configuration source. // - // Allowed values are "Inline" + // The only allowed value is "Inline". // - // When this field is set to "Inline", the cluster extension configuration is defined inline within the - // ClusterExtension resource. + // When set to "Inline", the cluster extension configuration is defined inline within the ClusterExtension resource. // // +unionDiscriminator // +kubebuilder:validation:Enum:="Inline" // +kubebuilder:validation:Required ConfigType ClusterExtensionConfigType `json:"configType"` - // inline contains JSON or YAML values specified directly in the - // ClusterExtension. + // inline contains JSON or YAML values specified directly in the ClusterExtension. // - // inline is used to specify arbitrary configuration values for the ClusterExtension. + // It is used to specify arbitrary configuration values for the ClusterExtension. // It must be set if configType is 'Inline' and must be a valid JSON/YAML object containing at least one property. // The configuration values are validated at runtime against a JSON schema provided by the bundle. // @@ -189,13 +184,12 @@ type ClusterExtensionConfig struct { // CatalogFilter defines the attributes used to identify and filter content from a catalog. type CatalogFilter struct { - // packageName is a reference to the name of the package to be installed - // and is used to filter the content from catalogs. + // packageName specifies the name of the package to be installed and is used to filter + // the content from catalogs. // - // packageName is required, immutable, and follows the DNS subdomain standard - // as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, - // hyphens (-) or periods (.), start and end with an alphanumeric character, - // and be no longer than 253 characters. + // It is required, immutable, and follows the DNS subdomain standard as defined in [RFC 1123]. + // It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + // start and end with an alphanumeric character, and be no longer than 253 characters. // // Some examples of valid values are: // - some-package @@ -218,12 +212,13 @@ type CatalogFilter struct { // +kubebuilder:validation:Required PackageName string `json:"packageName"` - // version is an optional semver constraint (a specific version or range of versions). When unspecified, the latest version available will be installed. + // version is an optional semver constraint (a specific version or range of versions). + // When unspecified, the latest version available is installed. // // Acceptable version ranges are no longer than 64 characters. - // Version ranges are composed of comma- or space-delimited values and one or - // more comparison operators, known as comparison strings. Additional - // comparison strings can be added using the OR operator (||). + // Version ranges are composed of comma- or space-delimited values and one or more comparison operators, + // known as comparison strings. + // You can add additional comparison strings using the OR operator (||). // // # Range Comparisons // @@ -297,25 +292,24 @@ type CatalogFilter struct { // +optional Version string `json:"version,omitempty"` - // channels is an optional reference to a set of channels belonging to - // the package specified in the packageName field. + // channels is optional and specifies a set of channels belonging to the package + // specified in the packageName field. // - // A "channel" is a package-author-defined stream of updates for an extension. + // A channel is a package-author-defined stream of updates for an extension. // - // Each channel in the list must follow the DNS subdomain standard - // as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, - // hyphens (-) or periods (.), start and end with an alphanumeric character, - // and be no longer than 253 characters. No more than 256 channels can be specified. + // Each channel in the list must follow the DNS subdomain standard as defined in [RFC 1123]. + // It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + // start and end with an alphanumeric character, and be no longer than 253 characters. + // You can specify no more than 256 channels. // - // When specified, it is used to constrain the set of installable bundles and - // the automated upgrade path. This constraint is an AND operation with the - // version field. For example: + // When specified, it constrains the set of installable bundles and the automated upgrade path. + // This constraint is an AND operation with the version field. For example: // - Given channel is set to "foo" // - Given version is set to ">=1.0.0, <1.5.0" - // - Only bundles that exist in channel "foo" AND satisfy the version range comparison will be considered installable - // - Automatic upgrades will be constrained to upgrade edges defined by the selected channel + // - Only bundles that exist in channel "foo" AND satisfy the version range comparison are considered installable + // - Automatic upgrades are constrained to upgrade edges defined by the selected channel // - // When unspecified, upgrade edges across all channels will be used to identify valid automatic upgrade paths. + // When unspecified, upgrade edges across all channels are used to identify valid automatic upgrade paths. // // Some examples of valid values are: // - 1.1.x @@ -342,33 +336,28 @@ type CatalogFilter struct { // +optional Channels []string `json:"channels,omitempty"` - // selector is an optional field that can be used - // to filter the set of ClusterCatalogs used in the bundle - // selection process. + // selector is optional and filters the set of ClusterCatalogs used in the bundle selection process. // - // When unspecified, all ClusterCatalogs will be used in - // the bundle selection process. + // When unspecified, all ClusterCatalogs are used in the bundle selection process. // // +optional Selector *metav1.LabelSelector `json:"selector,omitempty"` - // upgradeConstraintPolicy is an optional field that controls whether - // the upgrade path(s) defined in the catalog are enforced for the package - // referenced in the packageName field. + // upgradeConstraintPolicy is optional and controls whether the upgrade paths defined in the catalog + // are enforced for the package referenced in the packageName field. // - // Allowed values are: "CatalogProvided" or "SelfCertified", or omitted. + // Allowed values are "CatalogProvided", "SelfCertified", or omitted. // - // When this field is set to "CatalogProvided", automatic upgrades will only occur - // when upgrade constraints specified by the package author are met. + // When set to "CatalogProvided", automatic upgrades only occur when upgrade constraints specified by the package + // author are met. // - // When this field is set to "SelfCertified", the upgrade constraints specified by - // the package author are ignored. This allows for upgrades and downgrades to - // any version of the package. This is considered a dangerous operation as it - // can lead to unknown and potentially disastrous outcomes, such as data - // loss. It is assumed that users have independently verified changes when - // using this option. + // When set to "SelfCertified", the upgrade constraints specified by the package author are ignored. + // This allows upgrades and downgrades to any version of the package. + // This is considered a dangerous operation as it can lead to unknown and potentially disastrous outcomes, + // such as data loss. + // Use this option only if you have independently verified the changes. // - // When this field is omitted, the default value is "CatalogProvided". + // When omitted, the default value is "CatalogProvided". // // +kubebuilder:validation:Enum:=CatalogProvided;SelfCertified // +kubebuilder:default:=CatalogProvided @@ -378,16 +367,14 @@ type CatalogFilter struct { // ServiceAccountReference identifies the serviceAccount used fo install a ClusterExtension. type ServiceAccountReference struct { - // name is a required, immutable reference to the name of the ServiceAccount - // to be used for installation and management of the content for the package - // specified in the packageName field. + // name is a required, immutable reference to the name of the ServiceAccount used for installation + // and management of the content for the package specified in the packageName field. // // This ServiceAccount must exist in the installNamespace. // - // name follows the DNS subdomain standard as defined in [RFC 1123]. - // It must contain only lowercase alphanumeric characters, - // hyphens (-) or periods (.), start and end with an alphanumeric character, - // and be no longer than 253 characters. + // The name field follows the DNS subdomain standard as defined in [RFC 1123]. + // It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + // start and end with an alphanumeric character, and be no longer than 253 characters. // // Some examples of valid values are: // - some-serviceaccount @@ -413,26 +400,24 @@ type ServiceAccountReference struct { // // +kubebuilder:validation:XValidation:rule="has(self.crdUpgradeSafety)",message="at least one of [crdUpgradeSafety] are required when preflight is specified" type PreflightConfig struct { - // crdUpgradeSafety is used to configure the CRD Upgrade Safety pre-flight - // checks that run prior to upgrades of installed content. + // crdUpgradeSafety configures the CRD Upgrade Safety pre-flight checks that run + // before upgrades of installed content. // - // The CRD Upgrade Safety pre-flight check safeguards from unintended - // consequences of upgrading a CRD, such as data loss. + // The CRD Upgrade Safety pre-flight check safeguards from unintended consequences of upgrading a CRD, + // such as data loss. CRDUpgradeSafety *CRDUpgradeSafetyPreflightConfig `json:"crdUpgradeSafety"` } // CRDUpgradeSafetyPreflightConfig is the configuration for CRD upgrade safety preflight check. type CRDUpgradeSafetyPreflightConfig struct { - // enforcement is a required field, used to configure the state of the CRD Upgrade Safety pre-flight check. + // enforcement is required and configures the state of the CRD Upgrade Safety pre-flight check. // // Allowed values are "None" or "Strict". The default value is "Strict". // - // When set to "None", the CRD Upgrade Safety pre-flight check will be skipped - // when performing an upgrade operation. This should be used with caution as - // unintended consequences such as data loss can occur. + // When set to "None", the CRD Upgrade Safety pre-flight check is skipped during an upgrade operation. + // Use this option with caution as unintended consequences such as data loss can occur. // - // When set to "Strict", the CRD Upgrade Safety pre-flight check will be run when - // performing an upgrade operation. + // When set to "Strict", the CRD Upgrade Safety pre-flight check runs during an upgrade operation. // // +kubebuilder:validation:Enum:="None";"Strict" // +kubebuilder:validation:Required @@ -455,17 +440,16 @@ const ( // BundleMetadata is a representation of the identifying attributes of a bundle. type BundleMetadata struct { - // name is required and follows the DNS subdomain standard - // as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, - // hyphens (-) or periods (.), start and end with an alphanumeric character, - // and be no longer than 253 characters. + // name is required and follows the DNS subdomain standard as defined in [RFC 1123]. + // It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + // start and end with an alphanumeric character, and be no longer than 253 characters. // // +kubebuilder:validation:Required // +kubebuilder:validation:XValidation:rule="self.matches(\"^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$\")",message="packageName must be a valid DNS1123 subdomain. It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), start and end with an alphanumeric character, and be no longer than 253 characters" Name string `json:"name"` - // version is a required field and is a reference to the version that this bundle represents - // version follows the semantic versioning standard as defined in https://semver.org/. + // version is required and references the version that this bundle represents. + // It follows the semantic versioning standard as defined in https://semver.org/. // // +kubebuilder:validation:Required // +kubebuilder:validation:XValidation:rule="self.matches(\"^([0-9]+)(\\\\.[0-9]+)?(\\\\.[0-9]+)?(-([-0-9A-Za-z]+(\\\\.[-0-9A-Za-z]+)*))?(\\\\+([-0-9A-Za-z]+(-\\\\.[-0-9A-Za-z]+)*))?\")",message="version must be well-formed semver" @@ -489,11 +473,13 @@ type RevisionStatus struct { // ClusterExtensionStatus defines the observed state of a ClusterExtension. type ClusterExtensionStatus struct { + // conditions represents the current state of the ClusterExtension. + // // The set of condition types which apply to all spec.source variations are Installed and Progressing. // - // The Installed condition represents whether or not the bundle has been installed for this ClusterExtension. - // When Installed is True and the Reason is Succeeded, the bundle has been successfully installed. - // When Installed is False and the Reason is Failed, the bundle has failed to install. + // The Installed condition represents whether the bundle has been installed for this ClusterExtension: + // - When Installed is True and the Reason is Succeeded, the bundle has been successfully installed. + // - When Installed is False and the Reason is Failed, the bundle has failed to install. // // The Progressing condition represents whether or not the ClusterExtension is advancing towards a new state. // When Progressing is True and the Reason is Succeeded, the ClusterExtension is making progress towards a new state. @@ -503,12 +489,12 @@ type ClusterExtensionStatus struct { // When Progressing is True and Reason is RollingOut, the ClusterExtension has one or more ClusterExtensionRevisions in active roll out. // // - // When the ClusterExtension is sourced from a catalog, if may also communicate a deprecation condition. - // These are indications from a package owner to guide users away from a particular package, channel, or bundle. - // BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog. - // ChannelDeprecated is set if the requested channel is marked deprecated in the catalog. - // PackageDeprecated is set if the requested package is marked deprecated in the catalog. - // Deprecated is a rollup condition that is present when any of the deprecated conditions are present. + // When the ClusterExtension is sourced from a catalog, it may also communicate a deprecation condition. + // These are indications from a package owner to guide users away from a particular package, channel, or bundle: + // - BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog. + // - ChannelDeprecated is set if the requested channel is marked deprecated in the catalog. + // - PackageDeprecated is set if the requested package is marked deprecated in the catalog. + // - Deprecated is a rollup condition that is present when any of the deprecated conditions are present. // // +listType=map // +listMapKey=type @@ -531,10 +517,10 @@ type ClusterExtensionStatus struct { // ClusterExtensionInstallStatus is a representation of the status of the identified bundle. type ClusterExtensionInstallStatus struct { - // bundle is a required field which represents the identifying attributes of a bundle. + // bundle is required and represents the identifying attributes of a bundle. // - // A "bundle" is a versioned set of content that represents the resources that - // need to be applied to a cluster to install a package. + // A "bundle" is a versioned set of content that represents the resources that need to be applied + // to a cluster to install a package. // // +kubebuilder:validation:Required Bundle BundleMetadata `json:"bundle"` @@ -551,7 +537,11 @@ type ClusterExtensionInstallStatus struct { // ClusterExtension is the Schema for the clusterextensions API type ClusterExtension struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional metav1.ObjectMeta `json:"metadata,omitempty"` // spec is an optional field that defines the desired state of the ClusterExtension. diff --git a/api/v1/clusterextensionrevision_types.go b/api/v1/clusterextensionrevision_types.go index 368a8fca8..f7e7ff064 100644 --- a/api/v1/clusterextensionrevision_types.go +++ b/api/v1/clusterextensionrevision_types.go @@ -215,7 +215,11 @@ type ClusterExtensionRevisionStatus struct { // or reconfigured. Once the latest revision has rolled out successfully, previous active revisions are archived for // posterity. type ClusterExtensionRevision struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional metav1.ObjectMeta `json:"metadata,omitempty"` // spec defines the desired state of the ClusterExtensionRevision. diff --git a/commitchecker.yaml b/commitchecker.yaml index 883a4f9e0..4e6f23ca4 100644 --- a/commitchecker.yaml +++ b/commitchecker.yaml @@ -1,4 +1,4 @@ -expectedMergeBase: 39718ba9a077b3e95ff7e69cc8e6bef5a8815541 +expectedMergeBase: 9d8fda0a1cad37c5e04eea413650e39cfeb67cd6 upstreamBranch: main upstreamOrg: operator-framework upstreamRepo: operator-controller diff --git a/docs/api-reference/olmv1-api-reference.md b/docs/api-reference/olmv1-api-reference.md index 4fc8cb3c6..4feb0d632 100644 --- a/docs/api-reference/olmv1-api-reference.md +++ b/docs/api-reference/olmv1-api-reference.md @@ -46,8 +46,8 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `name` _string_ | name is required and follows the DNS subdomain standard
as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters,
hyphens (-) or periods (.), start and end with an alphanumeric character,
and be no longer than 253 characters. | | Required: \{\}
| -| `version` _string_ | version is a required field and is a reference to the version that this bundle represents
version follows the semantic versioning standard as defined in https://semver.org/. | | Required: \{\}
| +| `name` _string_ | name is required and follows the DNS subdomain standard as defined in [RFC 1123].
It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.),
start and end with an alphanumeric character, and be no longer than 253 characters. | | Required: \{\}
| +| `version` _string_ | version is required and references the version that this bundle represents.
It follows the semantic versioning standard as defined in https://semver.org/. | | Required: \{\}
| #### CRDUpgradeSafetyEnforcement @@ -80,7 +80,7 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `enforcement` _[CRDUpgradeSafetyEnforcement](#crdupgradesafetyenforcement)_ | enforcement is a required field, used to configure the state of the CRD Upgrade Safety pre-flight check.
Allowed values are "None" or "Strict". The default value is "Strict".
When set to "None", the CRD Upgrade Safety pre-flight check will be skipped
when performing an upgrade operation. This should be used with caution as
unintended consequences such as data loss can occur.
When set to "Strict", the CRD Upgrade Safety pre-flight check will be run when
performing an upgrade operation. | | Enum: [None Strict]
Required: \{\}
| +| `enforcement` _[CRDUpgradeSafetyEnforcement](#crdupgradesafetyenforcement)_ | enforcement is required and configures the state of the CRD Upgrade Safety pre-flight check.
Allowed values are "None" or "Strict". The default value is "Strict".
When set to "None", the CRD Upgrade Safety pre-flight check is skipped during an upgrade operation.
Use this option with caution as unintended consequences such as data loss can occur.
When set to "Strict", the CRD Upgrade Safety pre-flight check runs during an upgrade operation. | | Enum: [None Strict]
Required: \{\}
| #### CatalogFilter @@ -96,11 +96,11 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `packageName` _string_ | packageName is a reference to the name of the package to be installed
and is used to filter the content from catalogs.
packageName is required, immutable, and follows the DNS subdomain standard
as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters,
hyphens (-) or periods (.), start and end with an alphanumeric character,
and be no longer than 253 characters.
Some examples of valid values are:
- some-package
- 123-package
- 1-package-2
- somepackage
Some examples of invalid values are:
- -some-package
- some-package-
- thisisareallylongpackagenamethatisgreaterthanthemaximumlength
- some.package
[RFC 1123]: https://tools.ietf.org/html/rfc1123 | | MaxLength: 253
Required: \{\}
| -| `version` _string_ | version is an optional semver constraint (a specific version or range of versions). When unspecified, the latest version available will be installed.
Acceptable version ranges are no longer than 64 characters.
Version ranges are composed of comma- or space-delimited values and one or
more comparison operators, known as comparison strings. Additional
comparison strings can be added using the OR operator (\|\|).
# Range Comparisons
To specify a version range, you can use a comparison string like ">=3.0,
<3.6". When specifying a range, automatic updates will occur within that
range. The example comparison string means "install any version greater than
or equal to 3.0.0 but less than 3.6.0.". It also states intent that if any
upgrades are available within the version range after initial installation,
those upgrades should be automatically performed.
# Pinned Versions
To specify an exact version to install you can use a version range that
"pins" to a specific version. When pinning to a specific version, no
automatic updates will occur. An example of a pinned version range is
"0.6.0", which means "only install version 0.6.0 and never
upgrade from this version".
# Basic Comparison Operators
The basic comparison operators and their meanings are:
- "=", equal (not aliased to an operator)
- "!=", not equal
- "<", less than
- ">", greater than
- ">=", greater than OR equal to
- "<=", less than OR equal to
# Wildcard Comparisons
You can use the "x", "X", and "*" characters as wildcard characters in all
comparison operations. Some examples of using the wildcard characters:
- "1.2.x", "1.2.X", and "1.2.*" is equivalent to ">=1.2.0, < 1.3.0"
- ">= 1.2.x", ">= 1.2.X", and ">= 1.2.*" is equivalent to ">= 1.2.0"
- "<= 2.x", "<= 2.X", and "<= 2.*" is equivalent to "< 3"
- "x", "X", and "*" is equivalent to ">= 0.0.0"
# Patch Release Comparisons
When you want to specify a minor version up to the next major version you
can use the "~" character to perform patch comparisons. Some examples:
- "~1.2.3" is equivalent to ">=1.2.3, <1.3.0"
- "~1" and "~1.x" is equivalent to ">=1, <2"
- "~2.3" is equivalent to ">=2.3, <2.4"
- "~1.2.x" is equivalent to ">=1.2.0, <1.3.0"
# Major Release Comparisons
You can use the "^" character to make major release comparisons after a
stable 1.0.0 version is published. If there is no stable version published, // minor versions define the stability level. Some examples:
- "^1.2.3" is equivalent to ">=1.2.3, <2.0.0"
- "^1.2.x" is equivalent to ">=1.2.0, <2.0.0"
- "^2.3" is equivalent to ">=2.3, <3"
- "^2.x" is equivalent to ">=2.0.0, <3"
- "^0.2.3" is equivalent to ">=0.2.3, <0.3.0"
- "^0.2" is equivalent to ">=0.2.0, <0.3.0"
- "^0.0.3" is equvalent to ">=0.0.3, <0.0.4"
- "^0.0" is equivalent to ">=0.0.0, <0.1.0"
- "^0" is equivalent to ">=0.0.0, <1.0.0"
# OR Comparisons
You can use the "\|\|" character to represent an OR operation in the version
range. Some examples:
- ">=1.2.3, <2.0.0 \|\| >3.0.0"
- "^0 \|\| ^3 \|\| ^5"
For more information on semver, please see https://semver.org/ | | MaxLength: 64
| -| `channels` _string array_ | channels is an optional reference to a set of channels belonging to
the package specified in the packageName field.
A "channel" is a package-author-defined stream of updates for an extension.
Each channel in the list must follow the DNS subdomain standard
as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters,
hyphens (-) or periods (.), start and end with an alphanumeric character,
and be no longer than 253 characters. No more than 256 channels can be specified.
When specified, it is used to constrain the set of installable bundles and
the automated upgrade path. This constraint is an AND operation with the
version field. For example:
- Given channel is set to "foo"
- Given version is set to ">=1.0.0, <1.5.0"
- Only bundles that exist in channel "foo" AND satisfy the version range comparison will be considered installable
- Automatic upgrades will be constrained to upgrade edges defined by the selected channel
When unspecified, upgrade edges across all channels will be used to identify valid automatic upgrade paths.
Some examples of valid values are:
- 1.1.x
- alpha
- stable
- stable-v1
- v1-stable
- dev-preview
- preview
- community
Some examples of invalid values are:
- -some-channel
- some-channel-
- thisisareallylongchannelnamethatisgreaterthanthemaximumlength
- original_40
- --default-channel
[RFC 1123]: https://tools.ietf.org/html/rfc1123 | | MaxItems: 256
items:MaxLength: 253
items:XValidation: \{self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$") channels entries must be valid DNS1123 subdomains \}
| -| `selector` _[LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#labelselector-v1-meta)_ | selector is an optional field that can be used
to filter the set of ClusterCatalogs used in the bundle
selection process.
When unspecified, all ClusterCatalogs will be used in
the bundle selection process. | | | -| `upgradeConstraintPolicy` _[UpgradeConstraintPolicy](#upgradeconstraintpolicy)_ | upgradeConstraintPolicy is an optional field that controls whether
the upgrade path(s) defined in the catalog are enforced for the package
referenced in the packageName field.
Allowed values are: "CatalogProvided" or "SelfCertified", or omitted.
When this field is set to "CatalogProvided", automatic upgrades will only occur
when upgrade constraints specified by the package author are met.
When this field is set to "SelfCertified", the upgrade constraints specified by
the package author are ignored. This allows for upgrades and downgrades to
any version of the package. This is considered a dangerous operation as it
can lead to unknown and potentially disastrous outcomes, such as data
loss. It is assumed that users have independently verified changes when
using this option.
When this field is omitted, the default value is "CatalogProvided". | CatalogProvided | Enum: [CatalogProvided SelfCertified]
| +| `packageName` _string_ | packageName specifies the name of the package to be installed and is used to filter
the content from catalogs.
It is required, immutable, and follows the DNS subdomain standard as defined in [RFC 1123].
It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.),
start and end with an alphanumeric character, and be no longer than 253 characters.
Some examples of valid values are:
- some-package
- 123-package
- 1-package-2
- somepackage
Some examples of invalid values are:
- -some-package
- some-package-
- thisisareallylongpackagenamethatisgreaterthanthemaximumlength
- some.package
[RFC 1123]: https://tools.ietf.org/html/rfc1123 | | MaxLength: 253
Required: \{\}
| +| `version` _string_ | version is an optional semver constraint (a specific version or range of versions).
When unspecified, the latest version available is installed.
Acceptable version ranges are no longer than 64 characters.
Version ranges are composed of comma- or space-delimited values and one or more comparison operators,
known as comparison strings.
You can add additional comparison strings using the OR operator (\|\|).
# Range Comparisons
To specify a version range, you can use a comparison string like ">=3.0,
<3.6". When specifying a range, automatic updates will occur within that
range. The example comparison string means "install any version greater than
or equal to 3.0.0 but less than 3.6.0.". It also states intent that if any
upgrades are available within the version range after initial installation,
those upgrades should be automatically performed.
# Pinned Versions
To specify an exact version to install you can use a version range that
"pins" to a specific version. When pinning to a specific version, no
automatic updates will occur. An example of a pinned version range is
"0.6.0", which means "only install version 0.6.0 and never
upgrade from this version".
# Basic Comparison Operators
The basic comparison operators and their meanings are:
- "=", equal (not aliased to an operator)
- "!=", not equal
- "<", less than
- ">", greater than
- ">=", greater than OR equal to
- "<=", less than OR equal to
# Wildcard Comparisons
You can use the "x", "X", and "*" characters as wildcard characters in all
comparison operations. Some examples of using the wildcard characters:
- "1.2.x", "1.2.X", and "1.2.*" is equivalent to ">=1.2.0, < 1.3.0"
- ">= 1.2.x", ">= 1.2.X", and ">= 1.2.*" is equivalent to ">= 1.2.0"
- "<= 2.x", "<= 2.X", and "<= 2.*" is equivalent to "< 3"
- "x", "X", and "*" is equivalent to ">= 0.0.0"
# Patch Release Comparisons
When you want to specify a minor version up to the next major version you
can use the "~" character to perform patch comparisons. Some examples:
- "~1.2.3" is equivalent to ">=1.2.3, <1.3.0"
- "~1" and "~1.x" is equivalent to ">=1, <2"
- "~2.3" is equivalent to ">=2.3, <2.4"
- "~1.2.x" is equivalent to ">=1.2.0, <1.3.0"
# Major Release Comparisons
You can use the "^" character to make major release comparisons after a
stable 1.0.0 version is published. If there is no stable version published, // minor versions define the stability level. Some examples:
- "^1.2.3" is equivalent to ">=1.2.3, <2.0.0"
- "^1.2.x" is equivalent to ">=1.2.0, <2.0.0"
- "^2.3" is equivalent to ">=2.3, <3"
- "^2.x" is equivalent to ">=2.0.0, <3"
- "^0.2.3" is equivalent to ">=0.2.3, <0.3.0"
- "^0.2" is equivalent to ">=0.2.0, <0.3.0"
- "^0.0.3" is equvalent to ">=0.0.3, <0.0.4"
- "^0.0" is equivalent to ">=0.0.0, <0.1.0"
- "^0" is equivalent to ">=0.0.0, <1.0.0"
# OR Comparisons
You can use the "\|\|" character to represent an OR operation in the version
range. Some examples:
- ">=1.2.3, <2.0.0 \|\| >3.0.0"
- "^0 \|\| ^3 \|\| ^5"
For more information on semver, please see https://semver.org/ | | MaxLength: 64
| +| `channels` _string array_ | channels is optional and specifies a set of channels belonging to the package
specified in the packageName field.
A channel is a package-author-defined stream of updates for an extension.
Each channel in the list must follow the DNS subdomain standard as defined in [RFC 1123].
It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.),
start and end with an alphanumeric character, and be no longer than 253 characters.
You can specify no more than 256 channels.
When specified, it constrains the set of installable bundles and the automated upgrade path.
This constraint is an AND operation with the version field. For example:
- Given channel is set to "foo"
- Given version is set to ">=1.0.0, <1.5.0"
- Only bundles that exist in channel "foo" AND satisfy the version range comparison are considered installable
- Automatic upgrades are constrained to upgrade edges defined by the selected channel
When unspecified, upgrade edges across all channels are used to identify valid automatic upgrade paths.
Some examples of valid values are:
- 1.1.x
- alpha
- stable
- stable-v1
- v1-stable
- dev-preview
- preview
- community
Some examples of invalid values are:
- -some-channel
- some-channel-
- thisisareallylongchannelnamethatisgreaterthanthemaximumlength
- original_40
- --default-channel
[RFC 1123]: https://tools.ietf.org/html/rfc1123 | | MaxItems: 256
items:MaxLength: 253
items:XValidation: \{self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$") channels entries must be valid DNS1123 subdomains \}
| +| `selector` _[LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#labelselector-v1-meta)_ | selector is optional and filters the set of ClusterCatalogs used in the bundle selection process.
When unspecified, all ClusterCatalogs are used in the bundle selection process. | | | +| `upgradeConstraintPolicy` _[UpgradeConstraintPolicy](#upgradeconstraintpolicy)_ | upgradeConstraintPolicy is optional and controls whether the upgrade paths defined in the catalog
are enforced for the package referenced in the packageName field.
Allowed values are "CatalogProvided", "SelfCertified", or omitted.
When set to "CatalogProvided", automatic upgrades only occur when upgrade constraints specified by the package
author are met.
When set to "SelfCertified", the upgrade constraints specified by the package author are ignored.
This allows upgrades and downgrades to any version of the package.
This is considered a dangerous operation as it can lead to unknown and potentially disastrous outcomes,
such as data loss.
Use this option only if you have independently verified the changes.
When omitted, the default value is "CatalogProvided". | CatalogProvided | Enum: [CatalogProvided SelfCertified]
| #### CatalogSource @@ -117,15 +117,15 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `type` _[SourceType](#sourcetype)_ | type is a reference to the type of source the catalog is sourced from.
type is required.
The only allowed value is "Image".
When set to "Image", the ClusterCatalog content will be sourced from an OCI image.
When using an image source, the image field must be set and must be the only field defined for this type. | | Enum: [Image]
Required: \{\}
| -| `image` _[ImageSource](#imagesource)_ | image is used to configure how catalog contents are sourced from an OCI image.
This field is required when type is Image, and forbidden otherwise. | | | +| `type` _[SourceType](#sourcetype)_ | type is a required field that specifies the type of source for the catalog.
The only allowed value is "Image".
When set to "Image", the ClusterCatalog content is sourced from an OCI image.
When using an image source, the image field must be set and must be the only field defined for this type. | | Enum: [Image]
Required: \{\}
| +| `image` _[ImageSource](#imagesource)_ | image configures how catalog contents are sourced from an OCI image.
It is required when type is Image, and forbidden otherwise. | | | #### ClusterCatalog -ClusterCatalog enables users to make File-Based Catalog (FBC) catalog data available to the cluster. +ClusterCatalog makes File-Based Catalog (FBC) data available to your cluster. For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs @@ -140,8 +140,8 @@ _Appears in:_ | `kind` _string_ | Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | | | `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | | | `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | -| `spec` _[ClusterCatalogSpec](#clustercatalogspec)_ | spec is the desired state of the ClusterCatalog.
spec is required.
The controller will work to ensure that the desired
catalog is unpacked and served over the catalog content HTTP server. | | Required: \{\}
| -| `status` _[ClusterCatalogStatus](#clustercatalogstatus)_ | status contains information about the state of the ClusterCatalog such as:
- Whether or not the catalog contents are being served via the catalog content HTTP server
- Whether or not the ClusterCatalog is progressing to a new state
- A reference to the source from which the catalog contents were retrieved | | | +| `spec` _[ClusterCatalogSpec](#clustercatalogspec)_ | spec is a required field that defines the desired state of the ClusterCatalog.
The controller ensures that the catalog is unpacked and served over the catalog content HTTP server. | | Required: \{\}
| +| `status` _[ClusterCatalogStatus](#clustercatalogstatus)_ | status contains the following information about the state of the ClusterCatalog:
- Whether the catalog contents are being served via the catalog content HTTP server
- Whether the ClusterCatalog is progressing to a new state
- A reference to the source from which the catalog contents were retrieved | | | #### ClusterCatalogList @@ -177,9 +177,9 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `source` _[CatalogSource](#catalogsource)_ | source allows a user to define the source of a catalog.
A "catalog" contains information on content that can be installed on a cluster.
Providing a catalog source makes the contents of the catalog discoverable and usable by
other on-cluster components.
These on-cluster components may do a variety of things with this information, such as
presenting the content in a GUI dashboard or installing content from the catalog on the cluster.
The catalog source must contain catalog metadata in the File-Based Catalog (FBC) format.
For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs.
source is a required field.
Below is a minimal example of a ClusterCatalogSpec that sources a catalog from an image:
source:
type: Image
image:
ref: quay.io/operatorhubio/catalog:latest | | Required: \{\}
| -| `priority` _integer_ | priority allows the user to define a priority for a ClusterCatalog.
priority is optional.
A ClusterCatalog's priority is used by clients as a tie-breaker between ClusterCatalogs that meet the client's requirements.
A higher number means higher priority.
It is up to clients to decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements.
When deciding how to break the tie in this scenario, it is recommended that clients prompt their users for additional input.
When omitted, the default priority is 0 because that is the zero value of integers.
Negative numbers can be used to specify a priority lower than the default.
Positive numbers can be used to specify a priority higher than the default.
The lowest possible value is -2147483648.
The highest possible value is 2147483647. | 0 | | -| `availabilityMode` _[AvailabilityMode](#availabilitymode)_ | availabilityMode allows users to define how the ClusterCatalog is made available to clients on the cluster.
availabilityMode is optional.
Allowed values are "Available" and "Unavailable" and omitted.
When omitted, the default value is "Available".
When set to "Available", the catalog contents will be unpacked and served over the catalog content HTTP server.
Setting the availabilityMode to "Available" tells clients that they should consider this ClusterCatalog
and its contents as usable.
When set to "Unavailable", the catalog contents will no longer be served over the catalog content HTTP server.
When set to this availabilityMode it should be interpreted the same as the ClusterCatalog not existing.
Setting the availabilityMode to "Unavailable" can be useful in scenarios where a user may not want
to delete the ClusterCatalog all together, but would still like it to be treated as if it doesn't exist. | Available | Enum: [Unavailable Available]
| +| `source` _[CatalogSource](#catalogsource)_ | source is a required field that defines the source of a catalog.
A catalog contains information on content that can be installed on a cluster.
The catalog source makes catalog contents discoverable and usable by other on-cluster components.
These components can present the content in a GUI dashboard or install content from the catalog on the cluster.
The catalog source must contain catalog metadata in the File-Based Catalog (FBC) format.
For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs.
Below is a minimal example of a ClusterCatalogSpec that sources a catalog from an image:
source:
type: Image
image:
ref: quay.io/operatorhubio/catalog:latest | | Required: \{\}
| +| `priority` _integer_ | priority is an optional field that defines a priority for this ClusterCatalog.
Clients use the ClusterCatalog priority as a tie-breaker between ClusterCatalogs that meet their requirements.
Higher numbers mean higher priority.
Clients decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements.
Clients should prompt users for additional input to break the tie.
When omitted, the default priority is 0.
Use negative numbers to specify a priority lower than the default.
Use positive numbers to specify a priority higher than the default.
The lowest possible value is -2147483648.
The highest possible value is 2147483647. | 0 | Maximum: 2.147483647e+09
Minimum: -2.147483648e+09
| +| `availabilityMode` _[AvailabilityMode](#availabilitymode)_ | availabilityMode is an optional field that defines how the ClusterCatalog is made available to clients on the cluster.
Allowed values are "Available", "Unavailable", or omitted.
When omitted, the default value is "Available".
When set to "Available", the catalog contents are unpacked and served over the catalog content HTTP server.
Clients should consider this ClusterCatalog and its contents as usable.
When set to "Unavailable", the catalog contents are no longer served over the catalog content HTTP server.
Treat this the same as if the ClusterCatalog does not exist.
Use "Unavailable" when you want to keep the ClusterCatalog but treat it as if it doesn't exist. | Available | Enum: [Unavailable Available]
| #### ClusterCatalogStatus @@ -195,10 +195,10 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#condition-v1-meta) array_ | conditions is a representation of the current state for this ClusterCatalog.
The current condition types are Serving and Progressing.
The Serving condition is used to represent whether or not the contents of the catalog is being served via the HTTP(S) web server.
When it has a status of True and a reason of Available, the contents of the catalog are being served.
When it has a status of False and a reason of Unavailable, the contents of the catalog are not being served because the contents are not yet available.
When it has a status of False and a reason of UserSpecifiedUnavailable, the contents of the catalog are not being served because the catalog has been intentionally marked as unavailable.
The Progressing condition is used to represent whether or not the ClusterCatalog is progressing or is ready to progress towards a new state.
When it has a status of True and a reason of Retrying, there was an error in the progression of the ClusterCatalog that may be resolved on subsequent reconciliation attempts.
When it has a status of True and a reason of Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing.
When it has a status of False and a reason of Blocked, there was an error in the progression of the ClusterCatalog that requires manual intervention for recovery.
In the case that the Serving condition is True with reason Available and Progressing is True with reason Retrying, the previously fetched
catalog contents are still being served via the HTTP(S) web server while we are progressing towards serving a new version of the catalog
contents. This could occur when we've initially fetched the latest contents from the source for this catalog and when polling for changes
to the contents we identify that there are updates to the contents. | | | +| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#condition-v1-meta) array_ | conditions represents the current state of this ClusterCatalog.
The current condition types are Serving and Progressing.
The Serving condition represents whether the catalog contents are being served via the HTTP(S) web server:
- When status is True and reason is Available, the catalog contents are being served.
- When status is False and reason is Unavailable, the catalog contents are not being served because the contents are not yet available.
- When status is False and reason is UserSpecifiedUnavailable, the catalog contents are not being served because the catalog has been intentionally marked as unavailable.
The Progressing condition represents whether the ClusterCatalog is progressing or is ready to progress towards a new state:
- When status is True and reason is Retrying, an error occurred that may be resolved on subsequent reconciliation attempts.
- When status is True and reason is Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing.
- When status is False and reason is Blocked, an error occurred that requires manual intervention for recovery.
If the system initially fetched contents and polling identifies updates, both conditions can be active simultaneously:
- The Serving condition remains True with reason Available because the previous contents are still served via the HTTP(S) web server.
- The Progressing condition is True with reason Retrying because the system is working to serve the new version. | | | | `resolvedSource` _[ResolvedCatalogSource](#resolvedcatalogsource)_ | resolvedSource contains information about the resolved source based on the source type. | | | | `urls` _[ClusterCatalogURLs](#clustercatalogurls)_ | urls contains the URLs that can be used to access the catalog. | | | -| `lastUnpacked` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#time-v1-meta)_ | lastUnpacked represents the last time the contents of the
catalog were extracted from their source format. As an example,
when using an Image source, the OCI image will be pulled and the
image layers written to a file-system backed cache. We refer to the
act of this extraction from the source format as "unpacking". | | | +| `lastUnpacked` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#time-v1-meta)_ | lastUnpacked represents the last time the catalog contents were extracted from their source format.
For example, when using an Image source, the OCI image is pulled and image layers are written to a file-system backed cache.
This extraction from the source format is called "unpacking". | | | #### ClusterCatalogURLs @@ -214,7 +214,7 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `base` _string_ | base is a cluster-internal URL that provides endpoints for
accessing the content of the catalog.
It is expected that clients append the path for the endpoint they wish
to access.
Currently, only a single endpoint is served and is accessible at the path
/api/v1.
The endpoints served for the v1 API are:
- /all - this endpoint returns the entirety of the catalog contents in the FBC format
As the needs of users and clients of the evolve, new endpoints may be added. | | MaxLength: 525
Required: \{\}
| +| `base` _string_ | base is a cluster-internal URL that provides endpoints for accessing the catalog content.
Clients should append the path for the endpoint they want to access.
Currently, only a single endpoint is served and is accessible at the path /api/v1.
The endpoints served for the v1 API are:
- /all - this endpoint returns the entire catalog contents in the FBC format
New endpoints may be added as needs evolve. | | MaxLength: 525
Required: \{\}
| #### ClusterExtension @@ -253,8 +253,8 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `configType` _[ClusterExtensionConfigType](#clusterextensionconfigtype)_ | configType is a required reference to the type of configuration source.
Allowed values are "Inline"
When this field is set to "Inline", the cluster extension configuration is defined inline within the
ClusterExtension resource. | | Enum: [Inline]
Required: \{\}
| -| `inline` _[JSON](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#json-v1-apiextensions-k8s-io)_ | inline contains JSON or YAML values specified directly in the
ClusterExtension.
inline is used to specify arbitrary configuration values for the ClusterExtension.
It must be set if configType is 'Inline' and must be a valid JSON/YAML object containing at least one property.
The configuration values are validated at runtime against a JSON schema provided by the bundle. | | MinProperties: 1
Type: object
| +| `configType` _[ClusterExtensionConfigType](#clusterextensionconfigtype)_ | configType is required and specifies the type of configuration source.
The only allowed value is "Inline".
When set to "Inline", the cluster extension configuration is defined inline within the ClusterExtension resource. | | Enum: [Inline]
Required: \{\}
| +| `inline` _[JSON](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#json-v1-apiextensions-k8s-io)_ | inline contains JSON or YAML values specified directly in the ClusterExtension.
It is used to specify arbitrary configuration values for the ClusterExtension.
It must be set if configType is 'Inline' and must be a valid JSON/YAML object containing at least one property.
The configuration values are validated at runtime against a JSON schema provided by the bundle. | | MinProperties: 1
Type: object
| #### ClusterExtensionConfigType @@ -287,7 +287,7 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `preflight` _[PreflightConfig](#preflightconfig)_ | preflight is an optional field that can be used to configure the checks that are
run before installation or upgrade of the content for the package specified in the packageName field.
When specified, it replaces the default preflight configuration for install/upgrade actions.
When not specified, the default configuration will be used. | | | +| `preflight` _[PreflightConfig](#preflightconfig)_ | preflight is optional and configures the checks that run before installation or upgrade
of the content for the package specified in the packageName field.
When specified, it replaces the default preflight configuration for install/upgrade actions.
When not specified, the default configuration is used. | | | #### ClusterExtensionInstallStatus @@ -303,7 +303,7 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `bundle` _[BundleMetadata](#bundlemetadata)_ | bundle is a required field which represents the identifying attributes of a bundle.
A "bundle" is a versioned set of content that represents the resources that
need to be applied to a cluster to install a package. | | Required: \{\}
| +| `bundle` _[BundleMetadata](#bundlemetadata)_ | bundle is required and represents the identifying attributes of a bundle.
A "bundle" is a versioned set of content that represents the resources that need to be applied
to a cluster to install a package. | | Required: \{\}
| #### ClusterExtensionList @@ -339,11 +339,11 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `namespace` _string_ | namespace is a reference to a Kubernetes namespace.
This is the namespace in which the provided ServiceAccount must exist.
It also designates the default namespace where namespace-scoped resources
for the extension are applied to the cluster.
Some extensions may contain namespace-scoped resources to be applied in other namespaces.
This namespace must exist.
namespace is required, immutable, and follows the DNS label standard
as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters or hyphens (-),
start and end with an alphanumeric character, and be no longer than 63 characters
[RFC 1123]: https://tools.ietf.org/html/rfc1123 | | MaxLength: 63
Required: \{\}
| -| `serviceAccount` _[ServiceAccountReference](#serviceaccountreference)_ | serviceAccount is a reference to a ServiceAccount used to perform all interactions
with the cluster that are required to manage the extension.
The ServiceAccount must be configured with the necessary permissions to perform these interactions.
The ServiceAccount must exist in the namespace referenced in the spec.
serviceAccount is required. | | Required: \{\}
| -| `source` _[SourceConfig](#sourceconfig)_ | source is a required field which selects the installation source of content
for this ClusterExtension. Selection is performed by setting the sourceType.
Catalog is currently the only implemented sourceType, and setting the
sourcetype to "Catalog" requires the catalog field to also be defined.
Below is a minimal example of a source definition (in yaml):
source:
sourceType: Catalog
catalog:
packageName: example-package | | Required: \{\}
| -| `install` _[ClusterExtensionInstallConfig](#clusterextensioninstallconfig)_ | install is an optional field used to configure the installation options
for the ClusterExtension such as the pre-flight check configuration. | | | -| `config` _[ClusterExtensionConfig](#clusterextensionconfig)_ | config is an optional field used to specify bundle specific configuration
used to configure the bundle. Configuration is bundle specific and a bundle may provide
a configuration schema. When not specified, the default configuration of the resolved bundle will be used.
config is validated against a configuration schema provided by the resolved bundle. If the bundle does not provide
a configuration schema the bundle is deemed to not be configurable. More information on how
to configure bundles can be found in the OLM documentation associated with your current OLM version.
| | | +| `namespace` _string_ | namespace specifies a Kubernetes namespace.
This is the namespace where the provided ServiceAccount must exist.
It also designates the default namespace where namespace-scoped resources for the extension are applied to the cluster.
Some extensions may contain namespace-scoped resources to be applied in other namespaces.
This namespace must exist.
The namespace field is required, immutable, and follows the DNS label standard as defined in [RFC 1123].
It must contain only lowercase alphanumeric characters or hyphens (-), start and end with an alphanumeric character,
and be no longer than 63 characters.
[RFC 1123]: https://tools.ietf.org/html/rfc1123 | | MaxLength: 63
Required: \{\}
| +| `serviceAccount` _[ServiceAccountReference](#serviceaccountreference)_ | serviceAccount specifies a ServiceAccount used to perform all interactions with the cluster
that are required to manage the extension.
The ServiceAccount must be configured with the necessary permissions to perform these interactions.
The ServiceAccount must exist in the namespace referenced in the spec.
The serviceAccount field is required. | | Required: \{\}
| +| `source` _[SourceConfig](#sourceconfig)_ | source is required and selects the installation source of content for this ClusterExtension.
Set the sourceType field to perform the selection.
Catalog is currently the only implemented sourceType.
Setting sourceType to "Catalog" requires the catalog field to also be defined.
Below is a minimal example of a source definition (in yaml):
source:
sourceType: Catalog
catalog:
packageName: example-package | | Required: \{\}
| +| `install` _[ClusterExtensionInstallConfig](#clusterextensioninstallconfig)_ | install is optional and configures installation options for the ClusterExtension,
such as the pre-flight check configuration. | | | +| `config` _[ClusterExtensionConfig](#clusterextensionconfig)_ | config is optional and specifies bundle-specific configuration.
Configuration is bundle-specific and a bundle may provide a configuration schema.
When not specified, the default configuration of the resolved bundle is used.
config is validated against a configuration schema provided by the resolved bundle. If the bundle does not provide
a configuration schema the bundle is deemed to not be configurable. More information on how
to configure bundles can be found in the OLM documentation associated with your current OLM version. | | | #### ClusterExtensionStatus @@ -359,7 +359,7 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#condition-v1-meta) array_ | The set of condition types which apply to all spec.source variations are Installed and Progressing.
The Installed condition represents whether or not the bundle has been installed for this ClusterExtension.
When Installed is True and the Reason is Succeeded, the bundle has been successfully installed.
When Installed is False and the Reason is Failed, the bundle has failed to install.
The Progressing condition represents whether or not the ClusterExtension is advancing towards a new state.
When Progressing is True and the Reason is Succeeded, the ClusterExtension is making progress towards a new state.
When Progressing is True and the Reason is Retrying, the ClusterExtension has encountered an error that could be resolved on subsequent reconciliation attempts.
When Progressing is False and the Reason is Blocked, the ClusterExtension has encountered an error that requires manual intervention for recovery.

When Progressing is True and Reason is RollingOut, the ClusterExtension has one or more ClusterExtensionRevisions in active roll out.

When the ClusterExtension is sourced from a catalog, if may also communicate a deprecation condition.
These are indications from a package owner to guide users away from a particular package, channel, or bundle.
BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog.
ChannelDeprecated is set if the requested channel is marked deprecated in the catalog.
PackageDeprecated is set if the requested package is marked deprecated in the catalog.
Deprecated is a rollup condition that is present when any of the deprecated conditions are present. | | | +| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#condition-v1-meta) array_ | conditions represents the current state of the ClusterExtension.
The set of condition types which apply to all spec.source variations are Installed and Progressing.
The Installed condition represents whether the bundle has been installed for this ClusterExtension:
- When Installed is True and the Reason is Succeeded, the bundle has been successfully installed.
- When Installed is False and the Reason is Failed, the bundle has failed to install.
The Progressing condition represents whether or not the ClusterExtension is advancing towards a new state.
When Progressing is True and the Reason is Succeeded, the ClusterExtension is making progress towards a new state.
When Progressing is True and the Reason is Retrying, the ClusterExtension has encountered an error that could be resolved on subsequent reconciliation attempts.
When Progressing is False and the Reason is Blocked, the ClusterExtension has encountered an error that requires manual intervention for recovery.

When Progressing is True and Reason is RollingOut, the ClusterExtension has one or more ClusterExtensionRevisions in active roll out.

When the ClusterExtension is sourced from a catalog, it may also communicate a deprecation condition.
These are indications from a package owner to guide users away from a particular package, channel, or bundle:
- BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog.
- ChannelDeprecated is set if the requested channel is marked deprecated in the catalog.
- PackageDeprecated is set if the requested package is marked deprecated in the catalog.
- Deprecated is a rollup condition that is present when any of the deprecated conditions are present. | | | | `install` _[ClusterExtensionInstallStatus](#clusterextensioninstallstatus)_ | install is a representation of the current installation status for this ClusterExtension. | | | | `activeRevisions` _[RevisionStatus](#revisionstatus) array_ | activeRevisions holds a list of currently active (non-archived) ClusterExtensionRevisions,
including both installed and rolling out revisions.
| | | @@ -382,8 +382,8 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `ref` _string_ | ref allows users to define the reference to a container image containing Catalog contents.
ref is required.
ref can not be more than 1000 characters.
A reference can be broken down into 3 parts - the domain, name, and identifier.
The domain is typically the registry where an image is located.
It must be alphanumeric characters (lowercase and uppercase) separated by the "." character.
Hyphenation is allowed, but the domain must start and end with alphanumeric characters.
Specifying a port to use is also allowed by adding the ":" character followed by numeric values.
The port must be the last value in the domain.
Some examples of valid domain values are "registry.mydomain.io", "quay.io", "my-registry.io:8080".
The name is typically the repository in the registry where an image is located.
It must contain lowercase alphanumeric characters separated only by the ".", "_", "__", "-" characters.
Multiple names can be concatenated with the "/" character.
The domain and name are combined using the "/" character.
Some examples of valid name values are "operatorhubio/catalog", "catalog", "my-catalog.prod".
An example of the domain and name parts of a reference being combined is "quay.io/operatorhubio/catalog".
The identifier is typically the tag or digest for an image reference and is present at the end of the reference.
It starts with a separator character used to distinguish the end of the name and beginning of the identifier.
For a digest-based reference, the "@" character is the separator.
For a tag-based reference, the ":" character is the separator.
An identifier is required in the reference.
Digest-based references must contain an algorithm reference immediately after the "@" separator.
The algorithm reference must be followed by the ":" character and an encoded string.
The algorithm must start with an uppercase or lowercase alpha character followed by alphanumeric characters and may contain the "-", "_", "+", and "." characters.
Some examples of valid algorithm values are "sha256", "sha256+b64u", "multihash+base58".
The encoded string following the algorithm must be hex digits (a-f, A-F, 0-9) and must be a minimum of 32 characters.
Tag-based references must begin with a word character (alphanumeric + "_") followed by word characters or ".", and "-" characters.
The tag must not be longer than 127 characters.
An example of a valid digest-based image reference is "quay.io/operatorhubio/catalog@sha256:200d4ddb2a73594b91358fe6397424e975205bfbe44614f5846033cad64b3f05"
An example of a valid tag-based image reference is "quay.io/operatorhubio/catalog:latest" | | MaxLength: 1000
Required: \{\}
| -| `pollIntervalMinutes` _integer_ | pollIntervalMinutes allows the user to set the interval, in minutes, at which the image source should be polled for new content.
pollIntervalMinutes is optional.
pollIntervalMinutes can not be specified when ref is a digest-based reference.
When omitted, the image will not be polled for new content. | | Minimum: 1
| +| `ref` _string_ | ref is a required field that defines the reference to a container image containing catalog contents.
It cannot be more than 1000 characters.
A reference has 3 parts: the domain, name, and identifier.
The domain is typically the registry where an image is located.
It must be alphanumeric characters (lowercase and uppercase) separated by the "." character.
Hyphenation is allowed, but the domain must start and end with alphanumeric characters.
Specifying a port to use is also allowed by adding the ":" character followed by numeric values.
The port must be the last value in the domain.
Some examples of valid domain values are "registry.mydomain.io", "quay.io", "my-registry.io:8080".
The name is typically the repository in the registry where an image is located.
It must contain lowercase alphanumeric characters separated only by the ".", "_", "__", "-" characters.
Multiple names can be concatenated with the "/" character.
The domain and name are combined using the "/" character.
Some examples of valid name values are "operatorhubio/catalog", "catalog", "my-catalog.prod".
An example of the domain and name parts of a reference being combined is "quay.io/operatorhubio/catalog".
The identifier is typically the tag or digest for an image reference and is present at the end of the reference.
It starts with a separator character used to distinguish the end of the name and beginning of the identifier.
For a digest-based reference, the "@" character is the separator.
For a tag-based reference, the ":" character is the separator.
An identifier is required in the reference.
Digest-based references must contain an algorithm reference immediately after the "@" separator.
The algorithm reference must be followed by the ":" character and an encoded string.
The algorithm must start with an uppercase or lowercase alpha character followed by alphanumeric characters and may contain the "-", "_", "+", and "." characters.
Some examples of valid algorithm values are "sha256", "sha256+b64u", "multihash+base58".
The encoded string following the algorithm must be hex digits (a-f, A-F, 0-9) and must be a minimum of 32 characters.
Tag-based references must begin with a word character (alphanumeric + "_") followed by word characters or ".", and "-" characters.
The tag must not be longer than 127 characters.
An example of a valid digest-based image reference is "quay.io/operatorhubio/catalog@sha256:200d4ddb2a73594b91358fe6397424e975205bfbe44614f5846033cad64b3f05"
An example of a valid tag-based image reference is "quay.io/operatorhubio/catalog:latest" | | MaxLength: 1000
Required: \{\}
| +| `pollIntervalMinutes` _integer_ | pollIntervalMinutes is an optional field that sets the interval, in minutes, at which the image source is polled for new content.
You cannot specify pollIntervalMinutes when ref is a digest-based reference.
When omitted, the image is not polled for new content. | | Minimum: 1
| #### PreflightConfig @@ -399,7 +399,7 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `crdUpgradeSafety` _[CRDUpgradeSafetyPreflightConfig](#crdupgradesafetypreflightconfig)_ | crdUpgradeSafety is used to configure the CRD Upgrade Safety pre-flight
checks that run prior to upgrades of installed content.
The CRD Upgrade Safety pre-flight check safeguards from unintended
consequences of upgrading a CRD, such as data loss. | | | +| `crdUpgradeSafety` _[CRDUpgradeSafetyPreflightConfig](#crdupgradesafetypreflightconfig)_ | crdUpgradeSafety configures the CRD Upgrade Safety pre-flight checks that run
before upgrades of installed content.
The CRD Upgrade Safety pre-flight check safeguards from unintended consequences of upgrading a CRD,
such as data loss. | | | #### ResolvedCatalogSource @@ -416,8 +416,8 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `type` _[SourceType](#sourcetype)_ | type is a reference to the type of source the catalog is sourced from.
type is required.
The only allowed value is "Image".
When set to "Image", information about the resolved image source will be set in the 'image' field. | | Enum: [Image]
Required: \{\}
| -| `image` _[ResolvedImageSource](#resolvedimagesource)_ | image is a field containing resolution information for a catalog sourced from an image.
This field must be set when type is Image, and forbidden otherwise. | | | +| `type` _[SourceType](#sourcetype)_ | type is a required field that specifies the type of source for the catalog.
The only allowed value is "Image".
When set to "Image", information about the resolved image source is set in the image field. | | Enum: [Image]
Required: \{\}
| +| `image` _[ResolvedImageSource](#resolvedimagesource)_ | image contains resolution information for a catalog sourced from an image.
It must be set when type is Image, and forbidden otherwise. | | | #### ResolvedImageSource @@ -433,7 +433,7 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `ref` _string_ | ref contains the resolved image digest-based reference.
The digest format is used so users can use other tooling to fetch the exact
OCI manifests that were used to extract the catalog contents. | | MaxLength: 1000
Required: \{\}
| +| `ref` _string_ | ref contains the resolved image digest-based reference.
The digest format allows you to use other tooling to fetch the exact OCI manifests
that were used to extract the catalog contents. | | MaxLength: 1000
Required: \{\}
| #### RevisionStatus @@ -466,7 +466,7 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `name` _string_ | name is a required, immutable reference to the name of the ServiceAccount
to be used for installation and management of the content for the package
specified in the packageName field.
This ServiceAccount must exist in the installNamespace.
name follows the DNS subdomain standard as defined in [RFC 1123].
It must contain only lowercase alphanumeric characters,
hyphens (-) or periods (.), start and end with an alphanumeric character,
and be no longer than 253 characters.
Some examples of valid values are:
- some-serviceaccount
- 123-serviceaccount
- 1-serviceaccount-2
- someserviceaccount
- some.serviceaccount
Some examples of invalid values are:
- -some-serviceaccount
- some-serviceaccount-
[RFC 1123]: https://tools.ietf.org/html/rfc1123 | | MaxLength: 253
Required: \{\}
| +| `name` _string_ | name is a required, immutable reference to the name of the ServiceAccount used for installation
and management of the content for the package specified in the packageName field.
This ServiceAccount must exist in the installNamespace.
The name field follows the DNS subdomain standard as defined in [RFC 1123].
It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.),
start and end with an alphanumeric character, and be no longer than 253 characters.
Some examples of valid values are:
- some-serviceaccount
- 123-serviceaccount
- 1-serviceaccount-2
- someserviceaccount
- some.serviceaccount
Some examples of invalid values are:
- -some-serviceaccount
- some-serviceaccount-
[RFC 1123]: https://tools.ietf.org/html/rfc1123 | | MaxLength: 253
Required: \{\}
| #### SourceConfig @@ -482,8 +482,8 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `sourceType` _string_ | sourceType is a required reference to the type of install source.
Allowed values are "Catalog"
When this field is set to "Catalog", information for determining the
appropriate bundle of content to install will be fetched from
ClusterCatalog resources existing on the cluster.
When using the Catalog sourceType, the catalog field must also be set. | | Enum: [Catalog]
Required: \{\}
| -| `catalog` _[CatalogFilter](#catalogfilter)_ | catalog is used to configure how information is sourced from a catalog.
This field is required when sourceType is "Catalog", and forbidden otherwise. | | | +| `sourceType` _string_ | sourceType is required and specifies the type of install source.
The only allowed value is "Catalog".
When set to "Catalog", information for determining the appropriate bundle of content to install
is fetched from ClusterCatalog resources on the cluster.
When using the Catalog sourceType, the catalog field must also be set. | | Enum: [Catalog]
Required: \{\}
| +| `catalog` _[CatalogFilter](#catalogfilter)_ | catalog configures how information is sourced from a catalog.
It is required when sourceType is "Catalog", and forbidden otherwise. | | | #### SourceType diff --git a/docs/draft/howto/single-ownnamespace-install.md b/docs/draft/howto/single-ownnamespace-install.md index 8e6f00fe4..415294687 100644 --- a/docs/draft/howto/single-ownnamespace-install.md +++ b/docs/draft/howto/single-ownnamespace-install.md @@ -1,8 +1,7 @@ ## Description !!! note -This feature is still in *alpha* the `SingleOwnNamespaceInstallSupport` feature-gate must be enabled to make use of it. -See the instructions below on how to enable it. +The `SingleOwnNamespaceInstallSupport` feature-gate is enabled by default. Use this guide to configure bundles that need Single or Own namespace install modes. --- @@ -31,28 +30,6 @@ include *installModes*. [![OwnNamespace Install Demo](https://asciinema.org/a/Rxx6WUwAU016bXFDW74XLcM5i.svg)](https://asciinema.org/a/Rxx6WUwAU016bXFDW74XLcM5i) -## Enabling the Feature-Gate - -!!! tip - -This guide assumes OLMv1 is already installed. If that is not the case, -you can follow the [getting started](../../getting-started/olmv1_getting_started.md) guide to install OLMv1. - ---- - -Patch the `operator-controller` `Deployment` adding `--feature-gates=SingleOwnNamespaceInstallSupport=true` to the -controller container arguments: - -```terminal title="Enable SingleOwnNamespaceInstallSupport feature-gate" -kubectl patch deployment -n olmv1-system operator-controller-controller-manager --type='json' -p='[{"op": "add", "path": "/spec/template/spec/containers/0/args/-", "value": "--feature-gates=SingleOwnNamespaceInstallSupport=true"}]' -``` - -Wait for `Deployment` rollout: - -```terminal title="Wait for Deployment rollout" -kubectl rollout status -n olmv1-system deployment/operator-controller-controller-manager -``` - ## Configuring the `ClusterExtension` A `ClusterExtension` can be configured to install bundle in `Single-` or `OwnNamespace` mode through the diff --git a/docs/draft/tutorials/explore-available-content-metas-endpoint.md b/docs/draft/tutorials/explore-available-content-metas-endpoint.md index f17271d3e..5d04b02df 100644 --- a/docs/draft/tutorials/explore-available-content-metas-endpoint.md +++ b/docs/draft/tutorials/explore-available-content-metas-endpoint.md @@ -91,9 +91,6 @@ Then you can query the catalog by using `curl` commands and the `jq` CLI tool to ... ``` - !!! important - OLM 1.0 supports installing extensions that define webhooks. Targeting a single or specified set of namespaces requires enabling the `SingleOwnNamespaceInstallSupport` feature-gate. - 3. Return list of packages which support `AllNamespaces` install mode, do not use webhooks, and where the channel head version uses `olm.csv.metadata` format: ``` terminal diff --git a/docs/project/olmv1_limitations.md b/docs/project/olmv1_limitations.md index 01ce9436d..8f0e921e3 100644 --- a/docs/project/olmv1_limitations.md +++ b/docs/project/olmv1_limitations.md @@ -8,7 +8,8 @@ hide: Currently, OLM v1 only supports installing operators packaged in [OLM v0 bundles](https://olm.operatorframework.io/docs/tasks/creating-operator-bundle/) , also known as `registry+v1` bundles. Additionally, the bundled operator, or cluster extension: -* **must** support installation via the `AllNamespaces` install mode +* **must** support installation via the `AllNamespaces` install mode. + * Note that `AllNamespaces` is the recommended install mode. OLM v1 supports `SingleNamespace` and `OwnNamespace` modes for `registry+v1` bundles for backwards compatibility with OLM v0, but these are not recommended install modes as there is a [hard limitation](https://operator-framework.github.io/operator-controller/project/olmv1_design_decisions/#do-not-fight-kubernetes) of only one instance of a given CRD in the cluster. * **must not** declare dependencies using any of the following file-based catalog properties: * `olm.gvk.required` * `olm.package.required` diff --git a/docs/tutorials/explore-available-content.md b/docs/tutorials/explore-available-content.md index 36e3cf883..98bb7733c 100644 --- a/docs/tutorials/explore-available-content.md +++ b/docs/tutorials/explore-available-content.md @@ -91,9 +91,6 @@ Then you can query the catalog by using `curl` commands and the `jq` CLI tool to ... ``` - !!! important - OLM 1.0 supports installing extensions that define webhooks. Targeting a single or specified set of namespaces requires enabling the `SingleOwnNamespaceInstallSupport` feature-gate. - 3. Return list of packages that support `AllNamespaces` install mode and do not use webhooks: ``` terminal diff --git a/go.mod b/go.mod index 89aa72d7b..b727792c4 100644 --- a/go.mod +++ b/go.mod @@ -1,13 +1,15 @@ module github.com/operator-framework/operator-controller -go 1.24.6 +go 1.25.3 require ( - github.com/BurntSushi/toml v1.5.0 + github.com/BurntSushi/toml v1.6.0 github.com/Masterminds/semver/v3 v3.4.0 github.com/blang/semver/v4 v4.0.0 - github.com/cert-manager/cert-manager v1.18.2 - github.com/containerd/containerd v1.7.29 + github.com/cert-manager/cert-manager v1.19.2 + github.com/containerd/containerd v1.7.30 + github.com/cucumber/godog v0.15.1 + github.com/evanphx/json-patch v5.9.11+incompatible github.com/fsnotify/fsnotify v1.9.0 github.com/go-logr/logr v1.4.3 github.com/golang-jwt/jwt/v5 v5.3.0 @@ -15,48 +17,48 @@ require ( github.com/google/go-containerregistry v0.20.7 github.com/google/renameio/v2 v2.0.1 github.com/gorilla/handlers v1.5.2 - github.com/klauspost/compress v1.18.1 + github.com/klauspost/compress v1.18.2 github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/image-spec v1.1.1 - github.com/operator-framework/api v0.36.0 + github.com/operator-framework/api v0.37.0 github.com/operator-framework/helm-operator-plugins v0.8.0 github.com/operator-framework/operator-registry v1.61.0 github.com/prometheus/client_golang v1.23.2 github.com/prometheus/common v0.67.4 github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 - github.com/spf13/cobra v1.10.1 + github.com/spf13/cobra v1.10.2 github.com/spf13/pflag v1.0.10 github.com/stretchr/testify v1.11.1 go.podman.io/image/v5 v5.38.0 - golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b - golang.org/x/mod v0.30.0 - golang.org/x/sync v0.18.0 - golang.org/x/tools v0.39.0 - helm.sh/helm/v3 v3.19.3 - k8s.io/api v0.34.1 - k8s.io/apiextensions-apiserver v0.34.1 - k8s.io/apimachinery v0.34.1 - k8s.io/apiserver v0.34.1 - k8s.io/cli-runtime v0.34.0 - k8s.io/client-go v0.34.1 - k8s.io/component-base v0.34.1 + golang.org/x/exp v0.0.0-20250718183923-645b1fa84792 + golang.org/x/mod v0.31.0 + golang.org/x/sync v0.19.0 + golang.org/x/tools v0.40.0 + helm.sh/helm/v3 v3.19.4 + k8s.io/api v0.35.0 + k8s.io/apiextensions-apiserver v0.35.0 + k8s.io/apimachinery v0.35.0 + k8s.io/apiserver v0.35.0 + k8s.io/cli-runtime v0.34.2 + k8s.io/client-go v0.35.0 + k8s.io/component-base v0.35.0 k8s.io/klog/v2 v2.130.1 k8s.io/kubernetes v1.34.0 - k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 - pkg.package-operator.run/boxcutter v0.7.1 + k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 + pkg.package-operator.run/boxcutter v0.8.0 sigs.k8s.io/controller-runtime v0.22.4 - sigs.k8s.io/controller-tools v0.19.0 + sigs.k8s.io/controller-tools v0.20.0 sigs.k8s.io/crdify v0.5.0 sigs.k8s.io/yaml v1.6.0 ) require ( k8s.io/component-helpers v0.34.0 // indirect - k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect + k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect ) require ( - cel.dev/expr v0.24.0 // indirect + cel.dev/expr v0.25.1 // indirect dario.cat/mergo v1.0.2 // indirect github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 // indirect github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect @@ -71,7 +73,7 @@ require ( github.com/antlr4-go/antlr/v4 v4.13.1 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/cenkalti/backoff/v5 v5.0.2 // indirect + github.com/cenkalti/backoff/v5 v5.0.3 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/chai2010/gettext-go v1.0.2 // indirect github.com/containerd/cgroups/v3 v3.0.5 // indirect @@ -86,8 +88,10 @@ require ( github.com/containerd/typeurl/v2 v2.2.3 // indirect github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect github.com/containers/ocicrypt v1.2.1 // indirect + github.com/cucumber/gherkin/go/v26 v26.2.0 // indirect + github.com/cucumber/messages/go/v21 v21.0.1 // indirect github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 // indirect - github.com/cyphar/filepath-securejoin v0.6.0 // indirect + github.com/cyphar/filepath-securejoin v0.6.1 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/distribution/reference v0.6.0 // indirect github.com/docker/cli v29.0.3+incompatible // indirect @@ -97,7 +101,6 @@ require ( github.com/docker/go-connections v0.6.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/emicklei/go-restful/v3 v3.13.0 // indirect - github.com/evanphx/json-patch v5.9.11+incompatible // indirect github.com/evanphx/json-patch/v5 v5.9.11 // indirect github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect github.com/fatih/color v1.18.0 // indirect @@ -110,45 +113,48 @@ require ( github.com/go-gorp/gorp/v3 v3.1.0 // indirect github.com/go-jose/go-jose/v4 v4.1.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-openapi/jsonpointer v0.22.0 // indirect - github.com/go-openapi/jsonreference v0.21.1 // indirect - github.com/go-openapi/swag v0.24.1 // indirect - github.com/go-openapi/swag/cmdutils v0.24.0 // indirect - github.com/go-openapi/swag/conv v0.24.0 // indirect - github.com/go-openapi/swag/fileutils v0.24.0 // indirect - github.com/go-openapi/swag/jsonname v0.24.0 // indirect - github.com/go-openapi/swag/jsonutils v0.24.0 // indirect - github.com/go-openapi/swag/loading v0.24.0 // indirect - github.com/go-openapi/swag/mangling v0.24.0 // indirect - github.com/go-openapi/swag/netutils v0.24.0 // indirect - github.com/go-openapi/swag/stringutils v0.24.0 // indirect - github.com/go-openapi/swag/typeutils v0.24.0 // indirect - github.com/go-openapi/swag/yamlutils v0.24.0 // indirect + github.com/go-openapi/jsonpointer v0.22.3 // indirect + github.com/go-openapi/jsonreference v0.21.3 // indirect + github.com/go-openapi/swag v0.25.4 // indirect + github.com/go-openapi/swag/cmdutils v0.25.4 // indirect + github.com/go-openapi/swag/conv v0.25.4 // indirect + github.com/go-openapi/swag/fileutils v0.25.4 // indirect + github.com/go-openapi/swag/jsonname v0.25.4 // indirect + github.com/go-openapi/swag/jsonutils v0.25.4 // indirect + github.com/go-openapi/swag/loading v0.25.4 // indirect + github.com/go-openapi/swag/mangling v0.25.4 // indirect + github.com/go-openapi/swag/netutils v0.25.4 // indirect + github.com/go-openapi/swag/stringutils v0.25.4 // indirect + github.com/go-openapi/swag/typeutils v0.25.4 // indirect + github.com/go-openapi/swag/yamlutils v0.25.4 // indirect github.com/gobuffalo/flect v1.0.3 // indirect github.com/gobwas/glob v0.2.3 // indirect + github.com/gofrs/uuid v4.3.1+incompatible // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/btree v1.1.3 // indirect github.com/google/cel-go v0.26.1 // indirect - github.com/google/gnostic-models v0.7.0 // indirect + github.com/google/gnostic-models v0.7.1 // indirect github.com/google/pprof v0.0.0-20251007162407-5df77e3f7d1d // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/mux v1.8.1 // indirect github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect github.com/gosuri/uitable v0.0.4 // indirect github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 // indirect github.com/h2non/filetype v1.1.3 // indirect github.com/h2non/go-is-svg v0.0.0-20160927212452-35e8c4b0612c // indirect github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-immutable-radix v1.3.1 // indirect + github.com/hashicorp/go-memdb v1.3.4 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/golang-lru v0.5.4 // indirect github.com/huandu/xstrings v1.5.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect github.com/jmoiron/sqlx v1.4.0 // indirect github.com/joelanford/ignore v0.1.1 // indirect - github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/pgzip v1.2.6 // indirect github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect @@ -156,7 +162,6 @@ require ( github.com/letsencrypt/boulder v0.0.0-20250624003606-5ddd5acf990d // indirect github.com/lib/pq v1.10.9 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect - github.com/mailru/easyjson v0.9.0 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-runewidth v0.0.16 // indirect @@ -179,7 +184,7 @@ require ( github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect - github.com/onsi/gomega v1.38.2 // indirect + github.com/onsi/gomega v1.38.3 // indirect github.com/opencontainers/runtime-spec v1.2.1 // indirect github.com/operator-framework/operator-lib v0.17.0 // indirect github.com/otiai10/copy v1.14.1 // indirect @@ -189,7 +194,7 @@ require ( github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/proglottis/gpgme v0.1.5 // indirect github.com/prometheus/client_model v0.6.2 // indirect - github.com/prometheus/procfs v0.17.0 // indirect + github.com/prometheus/procfs v0.19.2 // indirect github.com/rivo/uniseg v0.4.7 // indirect github.com/rubenv/sql-migrate v1.8.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect @@ -203,7 +208,7 @@ require ( github.com/spf13/cast v1.7.1 // indirect github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 // indirect github.com/stoewer/go-strcase v1.3.1 // indirect - github.com/stretchr/objx v0.5.2 // indirect + github.com/stretchr/objx v0.5.3 // indirect github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect github.com/ulikunitz/xz v0.5.15 // indirect github.com/vbatts/tar-split v0.12.2 // indirect @@ -215,8 +220,8 @@ require ( go.opentelemetry.io/auto/sdk v1.1.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect go.opentelemetry.io/otel v1.38.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0 // indirect go.opentelemetry.io/otel/metric v1.38.0 // indirect go.opentelemetry.io/otel/sdk v1.37.0 // indirect go.opentelemetry.io/otel/trace v1.38.0 // indirect @@ -225,13 +230,13 @@ require ( go.podman.io/storage v1.61.0 // indirect go.yaml.in/yaml/v2 v2.4.3 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect - golang.org/x/crypto v0.45.0 // indirect - golang.org/x/net v0.47.0 // indirect + golang.org/x/crypto v0.46.0 // indirect + golang.org/x/net v0.48.0 // indirect golang.org/x/oauth2 v0.33.0 // indirect - golang.org/x/sys v0.38.0 // indirect - golang.org/x/term v0.37.0 // indirect - golang.org/x/text v0.31.0 // indirect - golang.org/x/time v0.13.0 // indirect + golang.org/x/sys v0.39.0 // indirect + golang.org/x/term v0.38.0 // indirect + golang.org/x/text v0.32.0 // indirect + golang.org/x/time v0.14.0 // indirect gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect google.golang.org/genproto v0.0.0-20250603155806-513f23925822 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250804133106-a7a43d27e69b // indirect @@ -244,15 +249,15 @@ require ( gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/controller-manager v0.33.2 // indirect - k8s.io/kubectl v0.34.0 // indirect + k8s.io/kubectl v0.34.2 // indirect oras.land/oras-go/v2 v2.6.0 // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.33.0 // indirect - sigs.k8s.io/gateway-api v1.1.0 // indirect - sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect + sigs.k8s.io/gateway-api v1.4.0 // indirect + sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect sigs.k8s.io/kustomize/api v0.20.1 // indirect sigs.k8s.io/kustomize/kyaml v0.20.1 // indirect sigs.k8s.io/randfill v1.0.0 // indirect - sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.1 // indirect ) retract v1.5.0 // contains filename with ':' which causes failure creating module zip file diff --git a/go.sum b/go.sum index c9879d9aa..80bec6996 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,5 @@ -cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= -cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= +cel.dev/expr v0.25.1 h1:1KrZg61W6TWSxuNZ37Xy49ps13NUovb66QLprthtwi4= +cel.dev/expr v0.25.1/go.mod h1:hrXvqGP6G6gyx8UAHSHJ5RGk//1Oj5nXQ2NI02Nrsg4= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8= dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA= @@ -10,8 +10,8 @@ github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg= -github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= +github.com/BurntSushi/toml v1.6.0 h1:dRaEfpa2VI55EwlIW72hMRHdWouJeRF7TPYhI+AUQjk= +github.com/BurntSushi/toml v1.6.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU= github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU= github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= @@ -44,11 +44,11 @@ github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70= github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= -github.com/cenkalti/backoff/v5 v5.0.2 h1:rIfFVxEf1QsI7E1ZHfp/B4DF/6QBAUhmgkxc0H7Zss8= -github.com/cenkalti/backoff/v5 v5.0.2/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= +github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= +github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cert-manager/cert-manager v1.18.2 h1:H2P75ycGcTMauV3gvpkDqLdS3RSXonWF2S49QGA1PZE= -github.com/cert-manager/cert-manager v1.18.2/go.mod h1:icDJx4kG9BCNpGjBvrmsFd99d+lXUvWdkkcrSSQdIiw= +github.com/cert-manager/cert-manager v1.19.2 h1:jSprN1h5pgNDSl7HClAmIzXuTxic/5FXJ32kbQHqjlM= +github.com/cert-manager/cert-manager v1.19.2/go.mod h1:e9NzLtOKxTw7y99qLyWGmPo6mrC1Nh0EKKcMkRfK+GE= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk= @@ -57,8 +57,8 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/containerd/cgroups/v3 v3.0.5 h1:44na7Ud+VwyE7LIoJ8JTNQOa549a8543BmzaJHo6Bzo= github.com/containerd/cgroups/v3 v3.0.5/go.mod h1:SA5DLYnXO8pTGYiAHXz94qvLQTKfVM5GEVisn4jpins= -github.com/containerd/containerd v1.7.29 h1:90fWABQsaN9mJhGkoVnuzEY+o1XDPbg9BTC9QTAHnuE= -github.com/containerd/containerd v1.7.29/go.mod h1:azUkWcOvHrWvaiUjSQH0fjzuHIwSPg1WL5PshGP4Szs= +github.com/containerd/containerd v1.7.30 h1:/2vezDpLDVGGmkUXmlNPLCCNKHJ5BbC5tJB5JNzQhqE= +github.com/containerd/containerd v1.7.30/go.mod h1:fek494vwJClULlTpExsmOyKCMUAbuVjlFsJQc4/j44M= github.com/containerd/containerd/api v1.9.0 h1:HZ/licowTRazus+wt9fM6r/9BQO7S0vD5lMcWspGIg0= github.com/containerd/containerd/api v1.9.0/go.mod h1:GhghKFmTR3hNtyznBoQ0EMWr9ju5AqHjcZPsSpTKutI= github.com/containerd/continuity v0.4.5 h1:ZRoN1sXq9u7V6QoHMcVWGhOwDFqZ4B9i5H6un1Wh0x4= @@ -85,13 +85,21 @@ github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= github.com/coreos/go-systemd/v22 v22.6.0 h1:aGVa/v8B7hpb0TKl0MWoAavPDmHvobFe5R5zn0bCJWo= github.com/coreos/go-systemd/v22 v22.6.0/go.mod h1:iG+pp635Fo7ZmV/j14KUcmEyWF+0X7Lua8rrTWzYgWU= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/cucumber/gherkin/go/v26 v26.2.0 h1:EgIjePLWiPeslwIWmNQ3XHcypPsWAHoMCz/YEBKP4GI= +github.com/cucumber/gherkin/go/v26 v26.2.0/go.mod h1:t2GAPnB8maCT4lkHL99BDCVNzCh1d7dBhCLt150Nr/0= +github.com/cucumber/godog v0.15.1 h1:rb/6oHDdvVZKS66hrhpjFQFHjthFSrQBCOI1LwshNTI= +github.com/cucumber/godog v0.15.1/go.mod h1:qju+SQDewOljHuq9NSM66s0xEhogx0q30flfxL4WUk8= +github.com/cucumber/messages/go/v21 v21.0.1 h1:wzA0LxwjlWQYZd32VTlAVDTkW6inOFmSM+RuOwHZiMI= +github.com/cucumber/messages/go/v21 v21.0.1/go.mod h1:zheH/2HS9JLVFukdrsPWoPdmUtmYQAQPLk7w5vWsk5s= +github.com/cucumber/messages/go/v22 v22.0.0/go.mod h1:aZipXTKc0JnjCsXrJnuZpWhtay93k7Rn3Dee7iyPJjs= github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 h1:uX1JmpONuD549D73r6cgnxyUu18Zb7yHAy5AYU0Pm4Q= github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= -github.com/cyphar/filepath-securejoin v0.6.0 h1:BtGB77njd6SVO6VztOHfPxKitJvd/VPT+OFBFMOi1Is= -github.com/cyphar/filepath-securejoin v0.6.0/go.mod h1:A8hd4EnAeyujCJRrICiOWqjS1AX0a9kM5XL+NwKoYSc= +github.com/cyphar/filepath-securejoin v0.6.1 h1:5CeZ1jPXEiYt3+Z6zqprSAgSWiggmpVyciv8syjIpVE= +github.com/cyphar/filepath-securejoin v0.6.1/go.mod h1:A8hd4EnAeyujCJRrICiOWqjS1AX0a9kM5XL+NwKoYSc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -163,34 +171,40 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= -github.com/go-openapi/jsonpointer v0.22.0 h1:TmMhghgNef9YXxTu1tOopo+0BGEytxA+okbry0HjZsM= -github.com/go-openapi/jsonpointer v0.22.0/go.mod h1:xt3jV88UtExdIkkL7NloURjRQjbeUgcxFblMjq2iaiU= -github.com/go-openapi/jsonreference v0.21.1 h1:bSKrcl8819zKiOgxkbVNRUBIr6Wwj9KYrDbMjRs0cDA= -github.com/go-openapi/jsonreference v0.21.1/go.mod h1:PWs8rO4xxTUqKGu+lEvvCxD5k2X7QYkKAepJyCmSTT8= -github.com/go-openapi/swag v0.24.1 h1:DPdYTZKo6AQCRqzwr/kGkxJzHhpKxZ9i/oX0zag+MF8= -github.com/go-openapi/swag v0.24.1/go.mod h1:sm8I3lCPlspsBBwUm1t5oZeWZS0s7m/A+Psg0ooRU0A= -github.com/go-openapi/swag/cmdutils v0.24.0 h1:KlRCffHwXFI6E5MV9n8o8zBRElpY4uK4yWyAMWETo9I= -github.com/go-openapi/swag/cmdutils v0.24.0/go.mod h1:uxib2FAeQMByyHomTlsP8h1TtPd54Msu2ZDU/H5Vuf8= -github.com/go-openapi/swag/conv v0.24.0 h1:ejB9+7yogkWly6pnruRX45D1/6J+ZxRu92YFivx54ik= -github.com/go-openapi/swag/conv v0.24.0/go.mod h1:jbn140mZd7EW2g8a8Y5bwm8/Wy1slLySQQ0ND6DPc2c= -github.com/go-openapi/swag/fileutils v0.24.0 h1:U9pCpqp4RUytnD689Ek/N1d2N/a//XCeqoH508H5oak= -github.com/go-openapi/swag/fileutils v0.24.0/go.mod h1:3SCrCSBHyP1/N+3oErQ1gP+OX1GV2QYFSnrTbzwli90= -github.com/go-openapi/swag/jsonname v0.24.0 h1:2wKS9bgRV/xB8c62Qg16w4AUiIrqqiniJFtZGi3dg5k= -github.com/go-openapi/swag/jsonname v0.24.0/go.mod h1:GXqrPzGJe611P7LG4QB9JKPtUZ7flE4DOVechNaDd7Q= -github.com/go-openapi/swag/jsonutils v0.24.0 h1:F1vE1q4pg1xtO3HTyJYRmEuJ4jmIp2iZ30bzW5XgZts= -github.com/go-openapi/swag/jsonutils v0.24.0/go.mod h1:vBowZtF5Z4DDApIoxcIVfR8v0l9oq5PpYRUuteVu6f0= -github.com/go-openapi/swag/loading v0.24.0 h1:ln/fWTwJp2Zkj5DdaX4JPiddFC5CHQpvaBKycOlceYc= -github.com/go-openapi/swag/loading v0.24.0/go.mod h1:gShCN4woKZYIxPxbfbyHgjXAhO61m88tmjy0lp/LkJk= -github.com/go-openapi/swag/mangling v0.24.0 h1:PGOQpViCOUroIeak/Uj/sjGAq9LADS3mOyjznmHy2pk= -github.com/go-openapi/swag/mangling v0.24.0/go.mod h1:Jm5Go9LHkycsz0wfoaBDkdc4CkpuSnIEf62brzyCbhc= -github.com/go-openapi/swag/netutils v0.24.0 h1:Bz02HRjYv8046Ycg/w80q3g9QCWeIqTvlyOjQPDjD8w= -github.com/go-openapi/swag/netutils v0.24.0/go.mod h1:WRgiHcYTnx+IqfMCtu0hy9oOaPR0HnPbmArSRN1SkZM= -github.com/go-openapi/swag/stringutils v0.24.0 h1:i4Z/Jawf9EvXOLUbT97O0HbPUja18VdBxeadyAqS1FM= -github.com/go-openapi/swag/stringutils v0.24.0/go.mod h1:5nUXB4xA0kw2df5PRipZDslPJgJut+NjL7D25zPZ/4w= -github.com/go-openapi/swag/typeutils v0.24.0 h1:d3szEGzGDf4L2y1gYOSSLeK6h46F+zibnEas2Jm/wIw= -github.com/go-openapi/swag/typeutils v0.24.0/go.mod h1:q8C3Kmk/vh2VhpCLaoR2MVWOGP8y7Jc8l82qCTd1DYI= -github.com/go-openapi/swag/yamlutils v0.24.0 h1:bhw4894A7Iw6ne+639hsBNRHg9iZg/ISrOVr+sJGp4c= -github.com/go-openapi/swag/yamlutils v0.24.0/go.mod h1:DpKv5aYuaGm/sULePoeiG8uwMpZSfReo1HR3Ik0yaG8= +github.com/go-openapi/jsonpointer v0.22.3 h1:dKMwfV4fmt6Ah90zloTbUKWMD+0he+12XYAsPotrkn8= +github.com/go-openapi/jsonpointer v0.22.3/go.mod h1:0lBbqeRsQ5lIanv3LHZBrmRGHLHcQoOXQnf88fHlGWo= +github.com/go-openapi/jsonreference v0.21.3 h1:96Dn+MRPa0nYAR8DR1E03SblB5FJvh7W6krPI0Z7qMc= +github.com/go-openapi/jsonreference v0.21.3/go.mod h1:RqkUP0MrLf37HqxZxrIAtTWW4ZJIK1VzduhXYBEeGc4= +github.com/go-openapi/swag v0.25.4 h1:OyUPUFYDPDBMkqyxOTkqDYFnrhuhi9NR6QVUvIochMU= +github.com/go-openapi/swag v0.25.4/go.mod h1:zNfJ9WZABGHCFg2RnY0S4IOkAcVTzJ6z2Bi+Q4i6qFQ= +github.com/go-openapi/swag/cmdutils v0.25.4 h1:8rYhB5n6WawR192/BfUu2iVlxqVR9aRgGJP6WaBoW+4= +github.com/go-openapi/swag/cmdutils v0.25.4/go.mod h1:pdae/AFo6WxLl5L0rq87eRzVPm/XRHM3MoYgRMvG4A0= +github.com/go-openapi/swag/conv v0.25.4 h1:/Dd7p0LZXczgUcC/Ikm1+YqVzkEeCc9LnOWjfkpkfe4= +github.com/go-openapi/swag/conv v0.25.4/go.mod h1:3LXfie/lwoAv0NHoEuY1hjoFAYkvlqI/Bn5EQDD3PPU= +github.com/go-openapi/swag/fileutils v0.25.4 h1:2oI0XNW5y6UWZTC7vAxC8hmsK/tOkWXHJQH4lKjqw+Y= +github.com/go-openapi/swag/fileutils v0.25.4/go.mod h1:cdOT/PKbwcysVQ9Tpr0q20lQKH7MGhOEb6EwmHOirUk= +github.com/go-openapi/swag/jsonname v0.25.4 h1:bZH0+MsS03MbnwBXYhuTttMOqk+5KcQ9869Vye1bNHI= +github.com/go-openapi/swag/jsonname v0.25.4/go.mod h1:GPVEk9CWVhNvWhZgrnvRA6utbAltopbKwDu8mXNUMag= +github.com/go-openapi/swag/jsonutils v0.25.4 h1:VSchfbGhD4UTf4vCdR2F4TLBdLwHyUDTd1/q4i+jGZA= +github.com/go-openapi/swag/jsonutils v0.25.4/go.mod h1:7OYGXpvVFPn4PpaSdPHJBtF0iGnbEaTk8AvBkoWnaAY= +github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4 h1:IACsSvBhiNJwlDix7wq39SS2Fh7lUOCJRmx/4SN4sVo= +github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4/go.mod h1:Mt0Ost9l3cUzVv4OEZG+WSeoHwjWLnarzMePNDAOBiM= +github.com/go-openapi/swag/loading v0.25.4 h1:jN4MvLj0X6yhCDduRsxDDw1aHe+ZWoLjW+9ZQWIKn2s= +github.com/go-openapi/swag/loading v0.25.4/go.mod h1:rpUM1ZiyEP9+mNLIQUdMiD7dCETXvkkC30z53i+ftTE= +github.com/go-openapi/swag/mangling v0.25.4 h1:2b9kBJk9JvPgxr36V23FxJLdwBrpijI26Bx5JH4Hp48= +github.com/go-openapi/swag/mangling v0.25.4/go.mod h1:6dxwu6QyORHpIIApsdZgb6wBk/DPU15MdyYj/ikn0Hg= +github.com/go-openapi/swag/netutils v0.25.4 h1:Gqe6K71bGRb3ZQLusdI8p/y1KLgV4M/k+/HzVSqT8H0= +github.com/go-openapi/swag/netutils v0.25.4/go.mod h1:m2W8dtdaoX7oj9rEttLyTeEFFEBvnAx9qHd5nJEBzYg= +github.com/go-openapi/swag/stringutils v0.25.4 h1:O6dU1Rd8bej4HPA3/CLPciNBBDwZj9HiEpdVsb8B5A8= +github.com/go-openapi/swag/stringutils v0.25.4/go.mod h1:GTsRvhJW5xM5gkgiFe0fV3PUlFm0dr8vki6/VSRaZK0= +github.com/go-openapi/swag/typeutils v0.25.4 h1:1/fbZOUN472NTc39zpa+YGHn3jzHWhv42wAJSN91wRw= +github.com/go-openapi/swag/typeutils v0.25.4/go.mod h1:Ou7g//Wx8tTLS9vG0UmzfCsjZjKhpjxayRKTHXf2pTE= +github.com/go-openapi/swag/yamlutils v0.25.4 h1:6jdaeSItEUb7ioS9lFoCZ65Cne1/RZtPBZ9A56h92Sw= +github.com/go-openapi/swag/yamlutils v0.25.4/go.mod h1:MNzq1ulQu+yd8Kl7wPOut/YHAAU/H6hL91fF+E2RFwc= +github.com/go-openapi/testify/enable/yaml/v2 v2.0.2 h1:0+Y41Pz1NkbTHz8NngxTuAXxEodtNSI1WG1c/m5Akw4= +github.com/go-openapi/testify/enable/yaml/v2 v2.0.2/go.mod h1:kme83333GCtJQHXQ8UKX3IBZu6z8T5Dvy5+CW3NLUUg= +github.com/go-openapi/testify/v2 v2.0.2 h1:X999g3jeLcoY8qctY/c/Z8iBHTbwLz7R2WXd6Ub6wls= +github.com/go-openapi/testify/v2 v2.0.2/go.mod h1:HCPmvFFnheKK2BuwSA0TbbdxJ3I16pjwMkYkP4Ywn54= github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= github.com/go-sql-driver/mysql v1.9.1 h1:FrjNGn/BsJQjVRuSa8CBrM5BWA9BWoXXat3KrtSb/iI= github.com/go-sql-driver/mysql v1.9.1/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU= @@ -202,6 +216,9 @@ github.com/gobuffalo/flect v1.0.3 h1:xeWBM2nui+qnVvNM4S3foBhCAL2XgPU+a7FdpelbTq4 github.com/gobuffalo/flect v1.0.3/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/gofrs/uuid v4.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gofrs/uuid v4.3.1+incompatible h1:0/KbAdpx3UXAx1kEOWHJeOkpbgRFGHVgv+CFIY7dBJI= +github.com/gofrs/uuid v4.3.1+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= @@ -230,8 +247,8 @@ github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/cel-go v0.26.1 h1:iPbVVEdkhTX++hpe3lzSk7D3G3QSYqLGoHOcEio+UXQ= github.com/google/cel-go v0.26.1/go.mod h1:A9O8OU9rdvrK5MQyrqfIxo1a0u4g3sF8KB6PUIaryMM= -github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= -github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= +github.com/google/gnostic-models v0.7.1 h1:SisTfuFKJSKM5CPZkffwi6coztzzeYUhc3v4yxLWH8c= +github.com/google/gnostic-models v0.7.1/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -265,8 +282,8 @@ github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJr github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.1-0.20210315223345-82c243799c99 h1:JYghRBlGCZyCF2wNUJ8W0cwaQdtpcssJ4CgC406g+WU= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.1-0.20210315223345-82c243799c99/go.mod h1:3bDW6wMZJB7tiONtC/1Xpicra6Wp5GgbTbQWCbI5fkc= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.0 h1:+epNPbD5EqgpEMm5wrl4Hqts3jZt8+kYaqUisuuIGTk= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.0/go.mod h1:Zanoh4+gvIgluNqcfMVTJueD4wSS5hT7zTt4Mrutd90= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 h1:X5VWvz21y3gzm9Nw/kaUeku/1+uBhcekkmy4IkffJww= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1/go.mod h1:Zanoh4+gvIgluNqcfMVTJueD4wSS5hT7zTt4Mrutd90= github.com/h2non/filetype v1.1.3 h1:FKkx9QbD7HR/zjK1Ia5XiBsq9zdLi5Kf3zGyFTAFkGg= github.com/h2non/filetype v1.1.3/go.mod h1:319b3zT68BvV+WRj7cwy856M2ehB3HqNOt6sy1HndBY= github.com/h2non/go-is-svg v0.0.0-20160927212452-35e8c4b0612c h1:fEE5/5VNnYUoBOj2I9TP8Jc+a7lge3QWn9DKE7NCwfc= @@ -274,8 +291,19 @@ github.com/h2non/go-is-svg v0.0.0-20160927212452-35e8c4b0612c/go.mod h1:ObS/W+h8 github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-immutable-radix v1.3.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-memdb v1.3.4 h1:XSL3NR682X/cVk2IeV0d70N4DZ9ljI885xAEU8IoK3c= +github.com/hashicorp/go-memdb v1.3.4/go.mod h1:uBTr1oQbtuMgd1SSGoR8YV27eT3sBHbYiNm53bMpgSg= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru/arc/v2 v2.0.7 h1:QxkVTxwColcduO+LP7eJO56r2hFiG8zEbfAAzRv52KQ= github.com/hashicorp/golang-lru/arc/v2 v2.0.7/go.mod h1:Pe7gBlGdc8clY5LJ0LpJXMt5AmgmWNH1g+oFFVUHOEc= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= @@ -292,20 +320,21 @@ github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o= github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY= github.com/joelanford/ignore v0.1.1 h1:vKky5RDoPT+WbONrbQBgOn95VV/UPh4ejlyAbbzgnQk= github.com/joelanford/ignore v0.1.1/go.mod h1:8eho/D8fwQ3rIXrLwE23AaeaGDNXqLE9QJ3zJ4LIPCw= -github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= -github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co= -github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0= +github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk= +github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4= github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= @@ -320,8 +349,6 @@ github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= -github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= -github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= @@ -332,8 +359,8 @@ github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/mattn/go-sqlite3 v1.14.32 h1:JD12Ag3oLy1zQA+BNn74xRgaBbdhbNIDYvQUEuuErjs= github.com/mattn/go-sqlite3 v1.14.32/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= -github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ= -github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ= +github.com/miekg/dns v1.1.68 h1:jsSRkNozw7G/mnmXULynzMNIsgY2dHC8LO6U6Ij2JEA= +github.com/miekg/dns v1.1.68/go.mod h1:fujopn7TB3Pu3JM69XaawiU0wqjpL9/8xGop5UrTPps= github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= @@ -380,16 +407,16 @@ github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns= github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo= -github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A= -github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k= +github.com/onsi/gomega v1.38.3 h1:eTX+W6dobAYfFeGC2PV6RwXRu/MyT+cQguijutvkpSM= +github.com/onsi/gomega v1.38.3/go.mod h1:ZCU1pkQcXDO5Sl9/VVEGlDyp+zm0m1cmeG5TOzLgdh4= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= github.com/opencontainers/runtime-spec v1.2.1 h1:S4k4ryNgEpxW1dzyqffOmhI1BHYcjzU8lpJfSlR0xww= github.com/opencontainers/runtime-spec v1.2.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/operator-framework/api v0.36.0 h1:6+duRhamCvB540JbvNp/1+Pot7luff7HqdAOm9bAntg= -github.com/operator-framework/api v0.36.0/go.mod h1:QSmHMx8XpGsNWvjU5CUelVZC916VLp/TZhfYvGKpghM= +github.com/operator-framework/api v0.37.0 h1:2XCMWitBnumtJTqzip6LQKUwpM2pXVlt3gkpdlkbaCE= +github.com/operator-framework/api v0.37.0/go.mod h1:NZs4vB+Jiamyv3pdPDjZtuC4U7KX0eq4z2r5hKY5fUA= github.com/operator-framework/helm-operator-plugins v0.8.0 h1:0f6HOQC5likkf0b/OvGvw7nhDb6h8Cj5twdCNjwNzMc= github.com/operator-framework/helm-operator-plugins v0.8.0/go.mod h1:Sc+8bE38xTCgCChBUvtq/PxatEg9fAypr7S5iAw8nlA= github.com/operator-framework/operator-lib v0.17.0 h1:cbz51wZ9+GpWR1ZYP4CSKSSBxDlWxmmnseaHVZZjZt4= @@ -420,8 +447,8 @@ github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNw github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= github.com/prometheus/common v0.67.4 h1:yR3NqWO1/UyO1w2PhUvXlGQs/PtFmoveVO0KZ4+Lvsc= github.com/prometheus/common v0.67.4/go.mod h1:gP0fq6YjjNCLssJCQp0yk4M8W6ikLURwkdd/YKtTbyI= -github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= -github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= +github.com/prometheus/procfs v0.19.2 h1:zUMhqEW66Ex7OXIiDkll3tl9a1ZdilUOd/F6ZXw4Vws= +github.com/prometheus/procfs v0.19.2/go.mod h1:M0aotyiemPhBCM0z5w87kL22CxfcH05ZpYlu+b4J7mw= github.com/redis/go-redis/extra/rediscmd/v9 v9.10.0 h1:uTiEyEyfLhkw678n6EulHVto8AkcXVr8zUcBJNZ0ark= github.com/redis/go-redis/extra/rediscmd/v9 v9.10.0/go.mod h1:eFYL/99JvdLP4T9/3FZ5t2pClnv7mMskc+WstTcyVr4= github.com/redis/go-redis/extra/redisotel/v9 v9.10.0 h1:4z7/hCJ9Jft8EBb2tDmK38p2WjyIEJ1ShhhwAhjOCps= @@ -457,8 +484,11 @@ github.com/smallstep/pkcs7 v0.2.1 h1:6Kfzr/QizdIuB6LSv8y1LJdZ3aPSfTNhTLqAx9CTLfA github.com/smallstep/pkcs7 v0.2.1/go.mod h1:RcXHsMfL+BzH8tRhmrF1NkkpebKpq3JEM66cOFxanf0= github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= -github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= +github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.7/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= @@ -469,8 +499,8 @@ github.com/stoewer/go-strcase v1.3.1/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8w github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= -github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/objx v0.5.3 h1:jmXUvGomnU1o3W/V5h2VEradbpJDwGrzugQQvL0POH4= +github.com/stretchr/objx v0.5.3/go.mod h1:rDQraq+vQZU7Fde9LOZLr8Tax6zZvy4kuNKF+QYS+U0= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= @@ -478,6 +508,7 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= @@ -525,10 +556,10 @@ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.36.0 h1:zwd go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.36.0/go.mod h1:rUKCPscaRWWcqGT6HnEmYrK+YNe5+Sw64xgQTOJ5b30= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.36.0 h1:gAU726w9J8fwr4qRDqu1GYMNNs4gXrU+Pv20/N1UpB4= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.36.0/go.mod h1:RboSDkp7N292rgu+T0MgVt2qgFGu6qa1RpZDOtpL76w= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 h1:dNzwXjZKpMpE2JhmO+9HsPl42NIXFIFSUSSs0fiqra0= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0/go.mod h1:90PoxvaEB5n6AOdZvi+yWJQoE95U8Dhhw2bSyRqnTD0= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0 h1:JgtbA0xkWHnTmYk7YusopJFX6uleBmAuZ8n05NEh8nQ= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0/go.mod h1:179AK5aar5R3eS9FucPy6rggvU0g52cvKId8pv4+v0c= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0 h1:Ahq7pZmv87yiyn3jeFz/LekZmPLLdKejuO3NcK9MssM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0/go.mod h1:MJTqhM0im3mRLw1i8uGHnCvUEeS7VwRyxlLC78PA18M= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0 h1:EtFWSnwW9hGObjkIdmlnWSydO+Qs8OwzfzXLUPg4xOc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0/go.mod h1:QjUEoiGCPkvFZ/MjK6ZZfNOS6mfVEVKYE99dFhuN2LI= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0 h1:nRVXXvf78e00EwY6Wp0YII8ww2JVWshZ20HfTlE11AM= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0/go.mod h1:r49hO7CgrxY9Voaj3Xe8pANWtr0Oq916d0XAmOoCZAQ= go.opentelemetry.io/otel/exporters/prometheus v0.58.0 h1:CJAxWKFIqdBennqxJyOgnt5LqkeFRT+Mz3Yjz3hL+h8= @@ -563,8 +594,8 @@ go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= -go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc= +go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= @@ -577,11 +608,11 @@ golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliY golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M= -golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= -golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= +golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU= +golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b h1:M2rDM6z3Fhozi9O7NWsxAkg/yqS/lQJ6PmkyIV3YP+o= -golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8= +golang.org/x/exp v0.0.0-20250718183923-645b1fa84792 h1:R9PFI6EUdfVKgwKjZef7QIwGcBKu86OEFpJ9nUEP2l4= +golang.org/x/exp v0.0.0-20250718183923-645b1fa84792/go.mod h1:A+z0yzpGtvnG90cToK5n2tu8UJVP2XUATh+r+sfOOOc= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -592,8 +623,8 @@ golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= -golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= +golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI= +golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -610,8 +641,8 @@ golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= -golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= -golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= +golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= +golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.33.0 h1:4Q+qn+E5z8gPRJfmRy7C2gGG3T4jIprK6aSYgTXGRpo= golang.org/x/oauth2 v0.33.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= @@ -626,8 +657,8 @@ golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= -golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= +golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -646,8 +677,8 @@ golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= -golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= +golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -657,8 +688,8 @@ golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s= -golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= -golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= +golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q= +golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= @@ -668,10 +699,10 @@ golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= -golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= -golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= -golang.org/x/time v0.13.0 h1:eUlYslOIt32DgYD6utsuUeHs4d7AsEYLuIAdg7FlYgI= -golang.org/x/time v0.13.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= +golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= +golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= +golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= +golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -684,10 +715,10 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= -golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= -golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= -golang.org/x/tools/go/expect v0.1.0-deprecated h1:jY2C5HGYR5lqex3gEniOQL0r7Dq5+VGVgY1nudX5lXY= -golang.org/x/tools/go/expect v0.1.0-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY= +golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA= +golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc= +golang.org/x/tools/go/expect v0.1.1-deprecated h1:jpBZDwmgPhXsKZC6WhL20P4b/wmnpsEAGHaNy0n/rJM= +golang.org/x/tools/go/expect v0.1.1-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY= golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated h1:1h2MnaIAIXISqTFKdENegdpAgUXz6NrPEsbIeWaBRvM= golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated/go.mod h1:RVAQXBGNv1ib0J382/DPCRS/BPnsGebyM1Gj5VSDpG8= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -745,8 +776,8 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= -helm.sh/helm/v3 v3.19.3 h1:cTOsZ7XfjD9c05mPKTC1FjRT4h2cKzszfD5aSa72GM8= -helm.sh/helm/v3 v3.19.3/go.mod h1:vup/q0mmu4G+YD2xr9qF5GhhWdoj+wm2gXWojk5jnks= +helm.sh/helm/v3 v3.19.4 h1:E2yFBejmZBczWr5LblhjZbvAOAwVumfBO1AtN3nqI30= +helm.sh/helm/v3 v3.19.4/go.mod h1:PC1rk7PqacpkV4acUFMLStOOis7QM9Jq3DveHBInu4s= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/api v0.34.0 h1:L+JtP2wDbEYPUeNGbeSa/5GwFtIA662EmT2YSLOkAVE= @@ -769,37 +800,37 @@ k8s.io/controller-manager v0.34.0 h1:oCHoqS8dcFp7zDSu7HUvTpakq3isSxil3GprGGlJMsE k8s.io/controller-manager v0.34.0/go.mod h1:XFto21U+Mm9BT8r/Jd5E4tHCGtwjKAUFOuDcqaj2VK0= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA= -k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts= +k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE= +k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ= k8s.io/kubectl v0.34.0 h1:NcXz4TPTaUwhiX4LU+6r6udrlm0NsVnSkP3R9t0dmxs= k8s.io/kubectl v0.34.0/go.mod h1:bmd0W5i+HuG7/p5sqicr0Li0rR2iIhXL0oUyLF3OjR4= k8s.io/kubernetes v1.34.0 h1:NvUrwPAVB4W3mSOpJ/RtNGHWWYyUP/xPaX5rUSpzA0w= k8s.io/kubernetes v1.34.0/go.mod h1:iu+FhII+Oc/1gGWLJcer6wpyih441aNFHl7Pvm8yPto= -k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y= -k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck= +k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= oras.land/oras-go/v2 v2.6.0 h1:X4ELRsiGkrbeox69+9tzTu492FMUu7zJQW6eJU+I2oc= oras.land/oras-go/v2 v2.6.0/go.mod h1:magiQDfG6H1O9APp+rOsvCPcW1GD2MM7vgnKY0Y+u1o= -pkg.package-operator.run/boxcutter v0.7.1 h1:us5wn0px9aAkumrXiQx38+Sc9dTgKJsHFbePoRQeWRo= -pkg.package-operator.run/boxcutter v0.7.1/go.mod h1:xEOKM3e3xtfSKYIOssQnu6DOWceiIu52qziMDcmUmpI= +pkg.package-operator.run/boxcutter v0.8.0 h1:lHKUey0jtpuboRup0LmToseJQNVFF00KtzhwG5vyJgk= +pkg.package-operator.run/boxcutter v0.8.0/go.mod h1:BWraKaCa8V08MbFRMXrmJdb4A8/Ytc90vAJkExVUJqM= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.33.0 h1:qPrZsv1cwQiFeieFlRqT627fVZ+tyfou/+S5S0H5ua0= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.33.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= sigs.k8s.io/controller-runtime v0.22.4 h1:GEjV7KV3TY8e+tJ2LCTxUTanW4z/FmNB7l327UfMq9A= sigs.k8s.io/controller-runtime v0.22.4/go.mod h1:+QX1XUpTXN4mLoblf4tqr5CQcyHPAki2HLXqQMY6vh8= -sigs.k8s.io/controller-tools v0.19.0 h1:OU7jrPPiZusryu6YK0jYSjPqg8Vhf8cAzluP9XGI5uk= -sigs.k8s.io/controller-tools v0.19.0/go.mod h1:y5HY/iNDFkmFla2CfQoVb2AQXMsBk4ad84iR1PLANB0= +sigs.k8s.io/controller-tools v0.20.0 h1:VWZF71pwSQ2lZZCt7hFGJsOfDc5dVG28/IysjjMWXL8= +sigs.k8s.io/controller-tools v0.20.0/go.mod h1:b4qPmjGU3iZwqn34alUU5tILhNa9+VXK+J3QV0fT/uU= sigs.k8s.io/crdify v0.5.0 h1:mrMH9CgXQPTZUpTU6Klqfnlys8bggv/7uvLT2lXSP7A= sigs.k8s.io/crdify v0.5.0/go.mod h1:ZIFxaYNgKYmFtZCLPysncXQ8oqwnNlHQbRUfxJHZwzU= -sigs.k8s.io/gateway-api v1.1.0 h1:DsLDXCi6jR+Xz8/xd0Z1PYl2Pn0TyaFMOPPZIj4inDM= -sigs.k8s.io/gateway-api v1.1.0/go.mod h1:ZH4lHrL2sDi0FHZ9jjneb8kKnGzFWyrTya35sWUTrRs= -sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= -sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= +sigs.k8s.io/gateway-api v1.4.0 h1:ZwlNM6zOHq0h3WUX2gfByPs2yAEsy/EenYJB78jpQfQ= +sigs.k8s.io/gateway-api v1.4.0/go.mod h1:AR5RSqciWP98OPckEjOjh2XJhAe2Na4LHyXD2FUY7Qk= +sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg= +sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= sigs.k8s.io/kustomize/api v0.20.1 h1:iWP1Ydh3/lmldBnH/S5RXgT98vWYMaTUL1ADcr+Sv7I= sigs.k8s.io/kustomize/api v0.20.1/go.mod h1:t6hUFxO+Ph0VxIk1sKp1WS0dOjbPCtLJ4p8aADLwqjM= sigs.k8s.io/kustomize/kyaml v0.20.1 h1:PCMnA2mrVbRP3NIB6v9kYCAc38uvFLVs8j/CD567A78= sigs.k8s.io/kustomize/kyaml v0.20.1/go.mod h1:0EmkQHRUsJxY8Ug9Niig1pUMSCGHxQ5RklbpV/Ri6po= sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= -sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= -sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= +sigs.k8s.io/structured-merge-diff/v6 v6.3.1 h1:JrhdFMqOd/+3ByqlP2I45kTOZmTRLBUm5pvRjeheg7E= +sigs.k8s.io/structured-merge-diff/v6 v6.3.1/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= diff --git a/hack/api-lint-diff/run.sh b/hack/api-lint-diff/run.sh new file mode 100755 index 000000000..993ec9052 --- /dev/null +++ b/hack/api-lint-diff/run.sh @@ -0,0 +1,528 @@ +#!/usr/bin/env bash + +set -euo pipefail + +# Colors for output +RED='\033[0;31m' +YELLOW='\033[1;33m' +GREEN='\033[0;32m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +# Temporary directory for this run +TEMP_DIR="" +WORKTREE_DIR="" +BASELINE_BRANCH="${BASELINE_BRANCH:-main}" +API_DIR="api" + +# Cleanup function +cleanup() { + # Clean up git worktree first if it exists + if [[ -n "${WORKTREE_DIR}" && -d "${WORKTREE_DIR}" ]]; then + git worktree remove "${WORKTREE_DIR}" --force &> /dev/null || true + fi + + # Clean up temporary directory + if [[ -n "${TEMP_DIR}" && -d "${TEMP_DIR}" ]]; then + rm -rf "${TEMP_DIR}" + fi +} + +trap cleanup EXIT + +# Ensure we're in the repository root +if [[ ! -d ".git" ]]; then + echo -e "${RED}Error: Must be run from repository root${NC}" + exit 1 +fi + +if [[ ! -d "${API_DIR}" ]]; then + echo -e "${RED}Error: ${API_DIR}/ directory not found${NC}" + exit 1 +fi + +# Create temporary directory +TEMP_DIR=$(mktemp -d) +echo -e "${BLUE}Using temporary directory: ${TEMP_DIR}${NC}" >&2 + +# Get golangci-lint version from bingo +get_golangci_version() { + # Extract version from .bingo/Variables.mk + # Format: GOLANGCI_LINT := $(GOBIN)/golangci-lint-v2.7.2 + local version + version=$(grep 'GOLANGCI_LINT :=' .bingo/Variables.mk 2>/dev/null | sed -E 's/.*golangci-lint-(v[0-9.]+).*/\1/') + + # Validate that we got a version + if [[ -z "${version}" ]]; then + echo -e "${YELLOW}Warning: Could not extract golangci-lint version from .bingo/Variables.mk${NC}" >&2 + echo -e "${YELLOW}Using default version: latest${NC}" >&2 + version="latest" + fi + + echo "${version}" +} + +# Create temporary golangci-lint config for kube-api-linter +# This config focuses only on kube-api-linter for the api/ directory +create_temp_config() { + cat > "${TEMP_DIR}/.golangci.yaml" <<'EOF' +version: "2" +output: + formats: + tab: + path: stdout + colors: false +linters: + enable: + - kubeapilinter + settings: + custom: + kubeapilinter: + type: module + description: "Kube API Linter plugin" + original-url: "sigs.k8s.io/kube-api-linter" + settings: + linters: {} + lintersConfig: + optionalfields: + pointers: + preference: WhenRequired +run: + timeout: 5m + +issues: + exclude-rules: + # Ignore generated files + - path: zz_generated\..*\.go + linters: + - kubeapilinter + max-issues-per-linter: 0 + max-same-issues: 0 +EOF +} + +# Get kube-api-linter version - pinned for supply chain security +get_kube_api_linter_version() { + # Pin to specific pseudo-version to avoid supply chain risks + # kube-api-linter doesn't have semantic version tags, so we use a pseudo-version + # Update this version intentionally as part of dependency management + # To update: GOPROXY=https://proxy.golang.org go list -m -json sigs.k8s.io/kube-api-linter@latest + local version="v0.0.0-20251219161032-180d2bd496ef" # Latest as of 2025-12-19 + + echo "${version}" +} + +# Create custom golangci-lint configuration +create_custom_gcl_config() { + # Get golangci-lint version from bingo + local golangci_version + golangci_version=$(get_golangci_version) + + # Validate version is not empty + if [[ -z "${golangci_version}" ]]; then + echo -e "${RED}Error: Failed to determine golangci-lint version from .bingo/Variables.mk${NC}" >&2 + exit 1 + fi + + # Get kube-api-linter version (pinned for supply chain security) + local kube_api_linter_version + kube_api_linter_version=$(get_kube_api_linter_version) + + # Create custom-gcl config + cat > "${TEMP_DIR}/.custom-gcl.yml" <&2 + + # Create custom config + create_custom_gcl_config + + # Build custom golangci-lint using the 'custom' command + # This requires the base golangci-lint binary and runs from TEMP_DIR + # where .custom-gcl.yml is located + echo -e "${BLUE}Running golangci-lint custom build...${NC}" >&2 + local build_output + local abs_base_linter + # Convert to absolute path + if [[ "${base_linter}" != /* ]]; then + abs_base_linter="$(pwd)/${base_linter}" + else + abs_base_linter="${base_linter}" + fi + + if ! build_output=$(cd "${TEMP_DIR}" && "${abs_base_linter}" custom -v 2>&1); then + echo -e "${YELLOW}Warning: Failed to build custom golangci-lint${NC}" >&2 + echo -e "${YELLOW}Build output:${NC}" >&2 + echo "${build_output}" >&2 + echo -e "${YELLOW}Falling back to base linter (kube-api-linter will not be available)${NC}" >&2 + return 1 + fi + echo -e "${BLUE}Custom linter build completed${NC}" >&2 + + if [[ -f "${custom_binary}" ]]; then + echo -e "${GREEN}Successfully built custom golangci-lint at ${custom_binary}${NC}" >&2 + # Only echo the binary path to stdout for capture + echo "${custom_binary}" + return 0 + else + echo -e "${YELLOW}Warning: Custom binary not found at expected location${NC}" >&2 + return 1 + fi +} + +# Function to check if golangci-lint has kube-api-linter +check_linter_support() { + local linter_path="$1" + if ! "${linter_path}" linters 2>/dev/null | grep -q "kubeapilinter"; then + echo -e "${YELLOW}Warning: golangci-lint at ${linter_path} does not have kubeapilinter plugin${NC}" + echo -e "${YELLOW}Linting results may be incomplete. Consider using custom golangci-lint build.${NC}" + return 1 + fi + return 0 +} + +# Find golangci-lint binary +find_golangci_lint() { + # Check for custom build first + if [[ -f ".bingo/golangci-lint" ]]; then + echo ".bingo/golangci-lint" + return 0 + fi + + # Check for bin/golangci-lint + if [[ -f "bin/golangci-lint" ]]; then + echo "bin/golangci-lint" + return 0 + fi + + # Fall back to system golangci-lint + if command -v golangci-lint &> /dev/null; then + echo "golangci-lint" + return 0 + fi + + echo -e "${RED}Error: golangci-lint not found.${NC}" >&2 + echo -e "${RED}Searched for:${NC}" >&2 + echo -e " - .bingo/golangci-lint" >&2 + echo -e " - bin/golangci-lint" >&2 + echo -e " - golangci-lint on your \$PATH" >&2 + exit 1 +} + +# Run linter and capture output +run_linter() { + local config_file="$1" + local output_file="$2" + local linter_path="$3" + local repo_root="${4:-$(pwd)}" + + # Run golangci-lint on api/ directory only + # Use absolute paths to ensure consistency + (cd "${repo_root}" && "${linter_path}" run \ + --config="${config_file}" \ + --path-prefix="" \ + ./api/...) > "${output_file}" 2>&1 || true +} + +# Parse linter output into structured format +# Format: filename:line:column:linter:message +parse_linter_output() { + local output_file="$1" + local parsed_file="$2" + + # Expected format: path/api/v1/file.go:123:45 linter message + # We need to: extract api/ relative path, parse line:col, linter, and message + grep "/${API_DIR}/" "${output_file}" | \ + sed -E "s|^.*/("${API_DIR}"/[^:]+):([0-9]+):([0-9]+)[[:space:]]+([^[:space:]]+)[[:space:]]+(.+)$|\1:\2:\3:\4:\5|" \ + > "${parsed_file}" || true +} + +# Get list of files changed in api/ directory compared to baseline +get_changed_files() { + git diff "${BASELINE_BRANCH}...HEAD" --name-only -- "${API_DIR}/" | \ + grep '\.go$' | \ + grep -v 'zz_generated' || true +} + +# Categorize issues as NEW, PRE-EXISTING, or FIXED +categorize_issues() { + local current_file="$1" + local baseline_file="$2" + local changed_files_file="$3" + local new_issues_file="$4" + local preexisting_issues_file="$5" + local fixed_issues_file="$6" + + # Read changed files into array + local changed_files=() + if [[ -f "${changed_files_file}" ]]; then + while IFS= read -r file; do + changed_files+=("${file}") + done < "${changed_files_file}" + fi + + # Process current issues only if file exists and is not empty + if [[ -f "${current_file}" && -s "${current_file}" ]]; then + while IFS= read -r line; do + [[ -z "${line}" ]] && continue + + local file + file=$(echo "${line}" | cut -d: -f1) + + # If no files were changed, all issues are pre-existing + if [[ ${#changed_files[@]} -eq 0 ]]; then + echo "${line}" >> "${preexisting_issues_file}" + continue + fi + + # Check if file was changed + local file_changed=false + for changed_file in "${changed_files[@]}"; do + if [[ "${file}" == "${changed_file}" ]]; then + file_changed=true + break + fi + done + + # If file wasn't changed, it's pre-existing + if ! $file_changed; then + echo "${line}" >> "${preexisting_issues_file}" + continue + fi + + # Check if issue exists in baseline + # Compare without line numbers since line numbers can change when code is added/removed + # Format is: file:line:col:linter:message + # We'll compare: file:linter:message + # Extract file (field 1), linter (field 4), and message (field 5+) from current issue + local file_linter_msg + file_linter_msg=$(echo "${line}" | cut -d: -f1,4,5-) + + # Check if baseline has a matching issue (same file, linter, message but possibly different line number) + # We need to extract the same fields from baseline and compare + local found=false + if [[ -f "${baseline_file}" ]]; then + while IFS= read -r baseline_line; do + [[ -z "${baseline_line}" ]] && continue + local baseline_file_linter_msg + baseline_file_linter_msg=$(echo "${baseline_line}" | cut -d: -f1,4,5-) + if [[ "${file_linter_msg}" == "${baseline_file_linter_msg}" ]]; then + found=true + break + fi + done < "${baseline_file}" + fi + + if $found; then + echo "${line}" >> "${preexisting_issues_file}" + else + echo "${line}" >> "${new_issues_file}" + fi + done < "${current_file}" + fi + + # Find FIXED issues - issues in baseline that are NOT in current + if [[ -f "${baseline_file}" && -s "${baseline_file}" ]]; then + while IFS= read -r baseline_line; do + [[ -z "${baseline_line}" ]] && continue + + local file + file=$(echo "${baseline_line}" | cut -d: -f1) + + # Only check files that were changed + if [[ ${#changed_files[@]} -gt 0 ]]; then + local file_changed=false + for changed_file in "${changed_files[@]}"; do + if [[ "${file}" == "${changed_file}" ]]; then + file_changed=true + break + fi + done + + # Skip if file wasn't changed + if ! $file_changed; then + continue + fi + fi + + # Extract file:linter:message from baseline + local baseline_file_linter_msg + baseline_file_linter_msg=$(echo "${baseline_line}" | cut -d: -f1,4,5-) + + # Check if this issue still exists in current + local still_exists=false + if [[ -f "${current_file}" ]]; then + while IFS= read -r current_line; do + [[ -z "${current_line}" ]] && continue + local current_file_linter_msg + current_file_linter_msg=$(echo "${current_line}" | cut -d: -f1,4,5-) + if [[ "${baseline_file_linter_msg}" == "${current_file_linter_msg}" ]]; then + still_exists=true + break + fi + done < "${current_file}" + fi + + # If issue doesn't exist in current, it was fixed + if ! $still_exists; then + echo "${baseline_line}" >> "${fixed_issues_file}" + fi + done < "${baseline_file}" + fi +} + +# Output issue (basic format) +output_issue() { + echo "$1" +} + +# Generate basic report +generate_report() { + local new_issues_file="$1" + local preexisting_issues_file="$2" + local fixed_issues_file="$3" + local baseline_file="$4" + + local new_count=0 + local preexisting_count=0 + local fixed_count=0 + local baseline_count=0 + + [[ -f "${new_issues_file}" ]] && new_count=$(wc -l < "${new_issues_file}" | tr -d ' ') + [[ -f "${preexisting_issues_file}" ]] && preexisting_count=$(wc -l < "${preexisting_issues_file}" | tr -d ' ') + [[ -f "${fixed_issues_file}" ]] && fixed_count=$(wc -l < "${fixed_issues_file}" | tr -d ' ') + [[ -f "${baseline_file}" ]] && baseline_count=$(wc -l < "${baseline_file}" | tr -d ' ') + + local current_total=$((new_count + preexisting_count)) + + # Summary header + echo "API Lint Diff Results" + echo "=====================" + echo "Baseline (${BASELINE_BRANCH}): ${baseline_count} issues" + echo "Current branch: ${current_total} issues" + echo "" + echo "FIXED: ${fixed_count}" + echo "NEW: ${new_count}" + echo "PRE-EXISTING: ${preexisting_count}" + echo "" + + # Show FIXED issues + if [[ ${fixed_count} -gt 0 ]]; then + echo "=== FIXED ISSUES ===" + while IFS= read -r line; do + output_issue "${line}" + done < "${fixed_issues_file}" + echo "" + fi + + # Show NEW issues + if [[ ${new_count} -gt 0 ]]; then + echo "=== NEW ISSUES ===" + while IFS= read -r line; do + output_issue "${line}" + done < "${new_issues_file}" + echo "" + fi + + # Show PRE-EXISTING issues + if [[ ${preexisting_count} -gt 0 ]]; then + echo "=== PRE-EXISTING ISSUES ===" + while IFS= read -r line; do + output_issue "${line}" + done < "${preexisting_issues_file}" + echo "" + fi + + # Exit based on NEW issues count + if [[ ${new_count} -eq 0 ]]; then + if [[ ${fixed_count} -gt 0 ]]; then + echo -e "${GREEN}SUCCESS: Fixed ${fixed_count} issue(s), no new issues introduced.${NC}" + else + echo -e "${GREEN}NO NEW ISSUES found. Lint check passed.${NC}" + fi + if [[ ${preexisting_count} -gt 0 ]]; then + echo -e "${YELLOW}WARNING: ${preexisting_count} pre-existing issue(s) remain. Please address them separately.${NC}" + fi + return 0 + else + echo -e "${RED}FAILED: ${new_count} new issue(s) introduced${NC}" + return 1 + fi +} + +# Main execution +main() { + # Find golangci-lint + BASE_LINTER_PATH=$(find_golangci_lint) + + # Build custom linter with kube-api-linter plugin + LINTER_PATH="${BASE_LINTER_PATH}" + if CUSTOM_LINTER=$(build_custom_linter "${BASE_LINTER_PATH}"); then + LINTER_PATH="${CUSTOM_LINTER}" + fi + + # Convert to absolute path if needed + if [[ "${LINTER_PATH}" != /* ]]; then + LINTER_PATH="$(pwd)/${LINTER_PATH}" + fi + + # Create temporary config + create_temp_config + + # Get changed files + get_changed_files > "${TEMP_DIR}/changed_files.txt" + + # Run linter on current branch + REPO_ROOT="$(pwd)" + run_linter "${TEMP_DIR}/.golangci.yaml" "${TEMP_DIR}/current_output.txt" "${LINTER_PATH}" "${REPO_ROOT}" + parse_linter_output "${TEMP_DIR}/current_output.txt" "${TEMP_DIR}/current_parsed.txt" + + # Run linter on baseline + WORKTREE_DIR="${TEMP_DIR}/baseline_worktree" + if ! git worktree add --detach "${WORKTREE_DIR}" "${BASELINE_BRANCH}" 2>&1; then + echo -e "${RED}Error: Failed to create git worktree for baseline branch '${BASELINE_BRANCH}'${NC}" >&2 + echo -e "${RED}Please ensure the branch exists and try again.${NC}" >&2 + exit 1 + fi + run_linter "${TEMP_DIR}/.golangci.yaml" "${TEMP_DIR}/baseline_output.txt" "${LINTER_PATH}" "${WORKTREE_DIR}" + parse_linter_output "${TEMP_DIR}/baseline_output.txt" "${TEMP_DIR}/baseline_parsed.txt" + # Worktree cleanup is handled by the cleanup trap + + # Categorize issues + touch "${TEMP_DIR}/new_issues.txt" + touch "${TEMP_DIR}/preexisting_issues.txt" + touch "${TEMP_DIR}/fixed_issues.txt" + + categorize_issues \ + "${TEMP_DIR}/current_parsed.txt" \ + "${TEMP_DIR}/baseline_parsed.txt" \ + "${TEMP_DIR}/changed_files.txt" \ + "${TEMP_DIR}/new_issues.txt" \ + "${TEMP_DIR}/preexisting_issues.txt" \ + "${TEMP_DIR}/fixed_issues.txt" + + # Generate report + generate_report \ + "${TEMP_DIR}/new_issues.txt" \ + "${TEMP_DIR}/preexisting_issues.txt" \ + "${TEMP_DIR}/fixed_issues.txt" \ + "${TEMP_DIR}/baseline_parsed.txt" + + return $? +} + +# Run main function +main "$@" diff --git a/hack/ci/custom-linters/analyzers/testdata/go.mod b/hack/ci/custom-linters/analyzers/testdata/go.mod index 23875e233..6a5571ff3 100644 --- a/hack/ci/custom-linters/analyzers/testdata/go.mod +++ b/hack/ci/custom-linters/analyzers/testdata/go.mod @@ -1,5 +1,5 @@ module testdata -go 1.24.3 +go 1.25.3 require github.com/go-logr/logr v1.4.3 diff --git a/hack/conftest/policy/README.md b/hack/conftest/policy/README.md new file mode 100644 index 000000000..ff1a16bc6 --- /dev/null +++ b/hack/conftest/policy/README.md @@ -0,0 +1,70 @@ +# OPA Policies for NetworkPolicy Validation + +This directory contains [Open Policy Agent (OPA)](https://www.openpolicyagent.org/) Rego policies used by [conftest](https://www.conftest.dev/) to validate generated Kubernetes manifests. + +## Policy Files + +### olm-networkpolicies.rego + +Package: `main` + +Validates core OLM NetworkPolicy requirements: + +- **Deny-all policy**: Ensures a default deny-all NetworkPolicy exists with empty podSelector and both Ingress/Egress policy types +- **catalogd-controller-manager policy**: Validates the NetworkPolicy for catalogd: + - Ingress on port 7443 (Prometheus metrics scraping) + - Ingress on port 8443 (catalog metadata queries from operator-controller) + - Ingress on port 9443 (Kubernetes API server webhook access) + - General egress enabled +- **operator-controller-controller-manager policy**: Validates the NetworkPolicy for operator-controller: + - Ingress on port 8443 (Prometheus metrics scraping) + - General egress enabled (for pulling bundle images, connecting to catalogd, and Kubernetes API) + +### prometheus-networkpolicies.rego + +Package: `prometheus` + +Validates Prometheus NetworkPolicy requirements: + +- Ensures a NetworkPolicy exists that allows both ingress and egress traffic for prometheus pods + +## Usage + +These policies are automatically run as part of: + +- `make lint-helm` - Validates both helm/olmv1 and helm/prometheus charts (runs `main` and `prometheus` packages) +- `make manifests` - Generates and validates core OLM manifests using only `main` package policies + (Prometheus policies are intentionally skipped here, even if manifests include Prometheus resources; + they are validated via `make lint-helm`) + +### Running manually + +```bash +# Run all policies (main + prometheus namespaces) +(helm template olmv1 helm/olmv1; helm template prometheus helm/prometheus) | conftest test --policy hack/conftest/policy/ --combine -n main -n prometheus - + +# Run only OLM policies +helm template olmv1 helm/olmv1 | conftest test --policy hack/conftest/policy/ --combine -n main - + +# Run only prometheus policies +helm template prometheus helm/prometheus | conftest test --policy hack/conftest/policy/ --combine -n prometheus - +``` + +### Excluding policies + +Use the `-n` (namespace) flag to selectively run policies: + +```bash +# Skip prometheus policies +conftest test --policy hack/conftest/policy/ --combine -n main + +# Skip OLM policies +conftest test --policy hack/conftest/policy/ --combine -n prometheus +``` + +## Adding New Policies + +1. Add new rules to an existing `.rego` file or create a new one +2. Use `package main` for policies that should run by default on all manifests +3. Use a custom package name (e.g., `package prometheus`) for optional policies +4. Update the Makefile targets if new namespaces need to be included diff --git a/hack/conftest/policy/olm-networkpolicies.rego b/hack/conftest/policy/olm-networkpolicies.rego new file mode 100644 index 000000000..493a4fdd6 --- /dev/null +++ b/hack/conftest/policy/olm-networkpolicies.rego @@ -0,0 +1,160 @@ +package main + +import rego.v1 + +# Check that a deny-all NetworkPolicy exists +# A deny-all policy has: +# - podSelector: {} (empty, applies to all pods) +# - policyTypes containing both "Ingress" and "Egress" +# - No ingress or egress rules defined + +is_deny_all(policy) if { + policy.kind == "NetworkPolicy" + policy.apiVersion == "networking.k8s.io/v1" + + # podSelector must be empty (applies to all pods) + count(policy.spec.podSelector) == 0 + + # Must have both Ingress and Egress policy types + policy_types := {t | some t in policy.spec.policyTypes} + policy_types["Ingress"] + policy_types["Egress"] + + # Must not have any ingress rules + not policy.spec.ingress + + # Must not have any egress rules + not policy.spec.egress +} + +has_deny_all_policy if { + some i in numbers.range(0, count(input) - 1) + is_deny_all(input[i].contents) +} + +deny contains msg if { + not has_deny_all_policy + msg := "No deny-all NetworkPolicy found. A NetworkPolicy with empty podSelector, policyTypes [Ingress, Egress], and no ingress/egress rules is required." +} + +# Check that a NetworkPolicy exists for catalogd-controller-manager that: +# - Allows ingress on TCP ports 7443, 8443, 9443 +# - Allows general egress traffic + +is_catalogd_policy(policy) if { + policy.kind == "NetworkPolicy" + policy.apiVersion == "networking.k8s.io/v1" + policy.spec.podSelector.matchLabels["control-plane"] == "catalogd-controller-manager" +} + +catalogd_policies contains policy if { + some i in numbers.range(0, count(input) - 1) + policy := input[i].contents + is_catalogd_policy(policy) +} + +catalogd_ingress_ports contains port if { + some policy in catalogd_policies + some rule in policy.spec.ingress + some port in rule.ports + port.protocol == "TCP" +} + +catalogd_ingress_port_numbers contains num if { + some port in catalogd_ingress_ports + num := port.port +} + +catalogd_has_egress if { + some policy in catalogd_policies + policy.spec.egress +} + +deny contains msg if { + count(catalogd_policies) == 0 + msg := "No NetworkPolicy found for catalogd-controller-manager. A NetworkPolicy allowing ingress on TCP ports 7443, 8443, 9443 and general egress is required." +} + +deny contains msg if { + count(catalogd_policies) > 1 + msg := sprintf("Expected exactly 1 NetworkPolicy for catalogd-controller-manager, found %d.", [count(catalogd_policies)]) +} + +deny contains msg if { + count(catalogd_policies) == 1 + not catalogd_ingress_port_numbers[7443] + msg := "Allow traffic to port 7443. Permit Prometheus to scrape metrics from catalogd, which is essential for monitoring its performance and health." +} + +deny contains msg if { + count(catalogd_policies) == 1 + not catalogd_ingress_port_numbers[8443] + msg := "Allow traffic to port 8443. Permit clients (eg. operator-controller) to query catalog metadata from catalogd, which is a core function for bundle resolution and operator discovery." +} + +deny contains msg if { + count(catalogd_policies) == 1 + not catalogd_ingress_port_numbers[9443] + msg := "Allow traffic to port 9443. Permit Kubernetes API server to reach catalogd's mutating admission webhook, ensuring integrity of catalog resources." +} + +deny contains msg if { + count(catalogd_policies) == 1 + not catalogd_has_egress + msg := "Missing egress rules in catalogd-controller-manager NetworkPolicy. General egress is required to enable catalogd-controller to pull bundle images from arbitrary image registries, and interact with the Kubernetes API server." +} + +# Check that a NetworkPolicy exists for operator-controller-controller-manager that: +# - Allows ingress on TCP port 8443 +# - Allows general egress traffic + +is_operator_controller_policy(policy) if { + policy.kind == "NetworkPolicy" + policy.apiVersion == "networking.k8s.io/v1" + policy.spec.podSelector.matchLabels["control-plane"] == "operator-controller-controller-manager" +} + +operator_controller_policies contains policy if { + some i in numbers.range(0, count(input) - 1) + policy := input[i].contents + is_operator_controller_policy(policy) +} + +operator_controller_ingress_ports contains port if { + some policy in operator_controller_policies + some rule in policy.spec.ingress + some port in rule.ports + port.protocol == "TCP" +} + +operator_controller_ingress_port_numbers contains num if { + some port in operator_controller_ingress_ports + num := port.port +} + +operator_controller_has_egress if { + some policy in operator_controller_policies + policy.spec.egress +} + +deny contains msg if { + count(operator_controller_policies) == 0 + msg := "No NetworkPolicy found for operator-controller-controller-manager. A NetworkPolicy allowing ingress on TCP port 8443 and general egress is required." +} + +deny contains msg if { + count(operator_controller_policies) > 1 + msg := sprintf("Expected exactly 1 NetworkPolicy for operator-controller-controller-manager, found %d.", [count(operator_controller_policies)]) +} + +deny contains msg if { + count(operator_controller_policies) == 1 + not operator_controller_ingress_port_numbers[8443] + msg := "Allow traffic to port 8443. Permit Prometheus to scrape metrics from catalogd, which is essential for monitoring its performance and health." +} + +deny contains msg if { + count(operator_controller_policies) == 1 + not operator_controller_has_egress + msg := "Missing egress rules in operator-controller-controller-manager NetworkPolicy. General egress is required to enable operator-controller to pull bundle images from arbitrary image registries, connect to catalogd's HTTPS server for metadata, and interact with the Kubernetes API server." +} diff --git a/hack/conftest/policy/prometheus-networkpolicies.rego b/hack/conftest/policy/prometheus-networkpolicies.rego new file mode 100644 index 000000000..c37158250 --- /dev/null +++ b/hack/conftest/policy/prometheus-networkpolicies.rego @@ -0,0 +1,33 @@ +package prometheus + +import rego.v1 + +# Check that a NetworkPolicy exists that allows both ingress and egress traffic to prometheus pods +is_prometheus_policy(policy) if { + policy.kind == "NetworkPolicy" + policy.apiVersion == "networking.k8s.io/v1" + + # Must target prometheus pods + policy.spec.podSelector.matchLabels["app.kubernetes.io/name"] == "prometheus" + + # Must have both Ingress and Egress policy types + policy_types := {t | some t in policy.spec.policyTypes} + policy_types["Ingress"] + policy_types["Egress"] + + # Must have ingress rules defined (allowing traffic) + policy.spec.ingress + + # Must have egress rules defined (allowing traffic) + policy.spec.egress +} + +has_prometheus_policy if { + some i in numbers.range(0, count(input) - 1) + is_prometheus_policy(input[i].contents) +} + +deny contains msg if { + not has_prometheus_policy + msg := "No NetworkPolicy found that allows both ingress and egress traffic to prometheus pods. A NetworkPolicy targeting prometheus pods with ingress and egress rules is required." +} diff --git a/hack/demo/own-namespace-demo-script.sh b/hack/demo/own-namespace-demo-script.sh index 611c6dfb0..86b3d2876 100755 --- a/hack/demo/own-namespace-demo-script.sh +++ b/hack/demo/own-namespace-demo-script.sh @@ -6,16 +6,14 @@ set -e trap 'echo "Demo ran into error"; trap - SIGTERM && kill -- -$$; exit 1' ERR SIGINT SIGTERM EXIT -# install experimental CRDs with config field support -kubectl apply -f "$(dirname "${BASH_SOURCE[0]}")/../../manifests/experimental.yaml" +# install standard CRDs +echo "Install standard CRDs..." +kubectl apply -f "$(dirname "${BASH_SOURCE[0]}")/../../manifests/standard.yaml" -# wait for experimental CRDs to be available +# wait for standard CRDs to be available kubectl wait --for condition=established --timeout=60s crd/clusterextensions.olm.operatorframework.io -# enable 'SingleOwnNamespaceInstallSupport' feature gate -kubectl patch deployment -n olmv1-system operator-controller-controller-manager --type='json' -p='[{"op": "add", "path": "/spec/template/spec/containers/0/args/-", "value": "--feature-gates=SingleOwnNamespaceInstallSupport=true"}]' - -# wait for operator-controller to become available +# Ensure controller is healthy kubectl rollout status -n olmv1-system deployment/operator-controller-controller-manager # create install namespace @@ -57,17 +55,6 @@ kubectl delete clusterextension argocd-operator --ignore-not-found=true kubectl delete namespace argocd-system --ignore-not-found=true kubectl delete clusterrolebinding argocd-installer-crb --ignore-not-found=true -# remove feature gate from deployment -echo "Removing feature gate from operator-controller..." -kubectl patch deployment -n olmv1-system operator-controller-controller-manager --type='json' -p='[{"op": "remove", "path": "/spec/template/spec/containers/0/args", "value": "--feature-gates=SingleOwnNamespaceInstallSupport=true"}]' || true - -# restore standard CRDs -echo "Restoring standard CRDs..." -kubectl apply -f "$(dirname "${BASH_SOURCE[0]}")/../../manifests/base.yaml" - -# wait for standard CRDs to be available -kubectl wait --for condition=established --timeout=60s crd/clusterextensions.olm.operatorframework.io - # wait for operator-controller to become available with standard config kubectl rollout status -n olmv1-system deployment/operator-controller-controller-manager diff --git a/hack/demo/single-namespace-demo-script.sh b/hack/demo/single-namespace-demo-script.sh index 970268415..885854dd9 100755 --- a/hack/demo/single-namespace-demo-script.sh +++ b/hack/demo/single-namespace-demo-script.sh @@ -6,16 +6,14 @@ set -e trap 'echo "Demo ran into error"; trap - SIGTERM && kill -- -$$; exit 1' ERR SIGINT SIGTERM EXIT -# install experimental CRDs with config field support -kubectl apply -f "$(dirname "${BASH_SOURCE[0]}")/../../manifests/experimental.yaml" +# install standard CRDs +echo "Install standard CRDs..." +kubectl apply -f "$(dirname "${BASH_SOURCE[0]}")/../../manifests/standard.yaml" -# wait for experimental CRDs to be available +# wait for standard CRDs to be available kubectl wait --for condition=established --timeout=60s crd/clusterextensions.olm.operatorframework.io -# enable 'SingleOwnNamespaceInstallSupport' feature gate -kubectl patch deployment -n olmv1-system operator-controller-controller-manager --type='json' -p='[{"op": "add", "path": "/spec/template/spec/containers/0/args/-", "value": "--feature-gates=SingleOwnNamespaceInstallSupport=true"}]' - -# wait for operator-controller to become available +# Ensure controller is healthy kubectl rollout status -n olmv1-system deployment/operator-controller-controller-manager # create install namespace @@ -60,17 +58,6 @@ kubectl delete clusterextension argocd-operator --ignore-not-found=true kubectl delete namespace argocd-system argocd --ignore-not-found=true kubectl delete clusterrolebinding argocd-installer-crb --ignore-not-found=true -# remove feature gate from deployment -echo "Removing feature gate from operator-controller..." -kubectl patch deployment -n olmv1-system operator-controller-controller-manager --type='json' -p='[{"op": "remove", "path": "/spec/template/spec/containers/0/args", "value": "--feature-gates=SingleOwnNamespaceInstallSupport=true"}]' || true - -# restore standard CRDs -echo "Restoring standard CRDs..." -kubectl apply -f "$(dirname "${BASH_SOURCE[0]}")/../../manifests/base.yaml" - -# wait for standard CRDs to be available -kubectl wait --for condition=established --timeout=60s crd/clusterextensions.olm.operatorframework.io - # wait for operator-controller to become available with standard config kubectl rollout status -n olmv1-system deployment/operator-controller-controller-manager diff --git a/hack/kind-config/containerd/certs.d/go.mod b/hack/kind-config/containerd/certs.d/go.mod index adbe39415..fa9ef1076 100644 --- a/hack/kind-config/containerd/certs.d/go.mod +++ b/hack/kind-config/containerd/certs.d/go.mod @@ -1,6 +1,6 @@ module hack-cert.d -go 1.24.6 +go 1.25.3 // This file is present in the certs.d directory to ensure that // certs.d/host:port directories are not included in the main go diff --git a/hack/tools/check-go-version.sh b/hack/tools/check-go-version.sh index 59eb8b971..fb9bc8fec 100755 --- a/hack/tools/check-go-version.sh +++ b/hack/tools/check-go-version.sh @@ -46,7 +46,6 @@ fi GO_MAJOR=${MAX_VER[0]} GO_MINOR=${MAX_VER[1]} GO_PATCH=${MAX_VER[2]} -OVERRIDE_LABEL="override-go-verdiff" RETCODE=0 @@ -118,19 +117,4 @@ for f in $(find . -name "*.mod"); do fi done -for l in ${LABELS}; do - if [ "$l" == "${OVERRIDE_LABEL}" ]; then - if [ ${RETCODE} -eq 1 ]; then - echo "" - echo "Found ${OVERRIDE_LABEL} label, overriding failed results." - RETCODE=0 - fi - fi -done - -if [ ${RETCODE} -eq 1 ]; then - echo "" - echo "This test result may be overridden by applying the (${OVERRIDE_LABEL}) label to this PR and re-running the CI job." -fi - exit ${RETCODE} diff --git a/hack/tools/crd-generator/main_test.go b/hack/tools/crd-generator/main_test.go index 99f71a497..4987b4324 100644 --- a/hack/tools/crd-generator/main_test.go +++ b/hack/tools/crd-generator/main_test.go @@ -11,7 +11,7 @@ import ( apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" ) -const controllerToolsVersion = "v0.19.0" +const controllerToolsVersion = "v0.20.0" func TestRunGenerator(t *testing.T) { here, err := os.Getwd() diff --git a/hack/tools/crd-generator/testdata/output/experimental/olm.operatorframework.io_clusterextensions.yaml b/hack/tools/crd-generator/testdata/output/experimental/olm.operatorframework.io_clusterextensions.yaml index 9866f68bb..3ee03a42f 100644 --- a/hack/tools/crd-generator/testdata/output/experimental/olm.operatorframework.io_clusterextensions.yaml +++ b/hack/tools/crd-generator/testdata/output/experimental/olm.operatorframework.io_clusterextensions.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 olm.operatorframework.io/generator: experimental name: clusterextensions.olm.operatorframework.io spec: diff --git a/hack/tools/crd-generator/testdata/output/standard/olm.operatorframework.io_clusterextensions.yaml b/hack/tools/crd-generator/testdata/output/standard/olm.operatorframework.io_clusterextensions.yaml index 8dcb4beed..161f1b383 100644 --- a/hack/tools/crd-generator/testdata/output/standard/olm.operatorframework.io_clusterextensions.yaml +++ b/hack/tools/crd-generator/testdata/output/standard/olm.operatorframework.io_clusterextensions.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 olm.operatorframework.io/generator: standard name: clusterextensions.olm.operatorframework.io spec: diff --git a/hack/tools/test-profiling/go.mod b/hack/tools/test-profiling/go.mod index df225c427..11c55a0d8 100644 --- a/hack/tools/test-profiling/go.mod +++ b/hack/tools/test-profiling/go.mod @@ -1,6 +1,6 @@ module github.com/operator-framework/operator-controller/hack/tools/test-profiling -go 1.24.6 +go 1.25.3 require ( github.com/google/pprof v0.0.0-20251007162407-5df77e3f7d1d diff --git a/helm/experimental.yaml b/helm/experimental.yaml index b14b1b303..1d2761714 100644 --- a/helm/experimental.yaml +++ b/helm/experimental.yaml @@ -9,7 +9,6 @@ options: operatorController: features: enabled: - - SingleOwnNamespaceInstallSupport - PreflightPermissions - HelmChartSupport - BoxcutterRuntime diff --git a/helm/olmv1/base/catalogd/crd/experimental/olm.operatorframework.io_clustercatalogs.yaml b/helm/olmv1/base/catalogd/crd/experimental/olm.operatorframework.io_clustercatalogs.yaml index c78a57b92..39d6e906f 100644 --- a/helm/olmv1/base/catalogd/crd/experimental/olm.operatorframework.io_clustercatalogs.yaml +++ b/helm/olmv1/base/catalogd/crd/experimental/olm.operatorframework.io_clustercatalogs.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 olm.operatorframework.io/generator: experimental name: clustercatalogs.olm.operatorframework.io spec: @@ -29,7 +29,7 @@ spec: schema: openAPIV3Schema: description: |- - ClusterCatalog enables users to make File-Based Catalog (FBC) catalog data available to the cluster. + ClusterCatalog makes File-Based Catalog (FBC) data available to your cluster. For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs properties: apiVersion: @@ -51,29 +51,24 @@ spec: type: object spec: description: |- - spec is the desired state of the ClusterCatalog. - spec is required. - The controller will work to ensure that the desired - catalog is unpacked and served over the catalog content HTTP server. + spec is a required field that defines the desired state of the ClusterCatalog. + The controller ensures that the catalog is unpacked and served over the catalog content HTTP server. properties: availabilityMode: default: Available description: |- - availabilityMode allows users to define how the ClusterCatalog is made available to clients on the cluster. - availabilityMode is optional. + availabilityMode is an optional field that defines how the ClusterCatalog is made available to clients on the cluster. - Allowed values are "Available" and "Unavailable" and omitted. + Allowed values are "Available", "Unavailable", or omitted. When omitted, the default value is "Available". - When set to "Available", the catalog contents will be unpacked and served over the catalog content HTTP server. - Setting the availabilityMode to "Available" tells clients that they should consider this ClusterCatalog - and its contents as usable. + When set to "Available", the catalog contents are unpacked and served over the catalog content HTTP server. + Clients should consider this ClusterCatalog and its contents as usable. - When set to "Unavailable", the catalog contents will no longer be served over the catalog content HTTP server. - When set to this availabilityMode it should be interpreted the same as the ClusterCatalog not existing. - Setting the availabilityMode to "Unavailable" can be useful in scenarios where a user may not want - to delete the ClusterCatalog all together, but would still like it to be treated as if it doesn't exist. + When set to "Unavailable", the catalog contents are no longer served over the catalog content HTTP server. + Treat this the same as if the ClusterCatalog does not exist. + Use "Unavailable" when you want to keep the ClusterCatalog but treat it as if it doesn't exist. enum: - Unavailable - Available @@ -81,35 +76,33 @@ spec: priority: default: 0 description: |- - priority allows the user to define a priority for a ClusterCatalog. - priority is optional. + priority is an optional field that defines a priority for this ClusterCatalog. - A ClusterCatalog's priority is used by clients as a tie-breaker between ClusterCatalogs that meet the client's requirements. - A higher number means higher priority. + Clients use the ClusterCatalog priority as a tie-breaker between ClusterCatalogs that meet their requirements. + Higher numbers mean higher priority. - It is up to clients to decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements. - When deciding how to break the tie in this scenario, it is recommended that clients prompt their users for additional input. + Clients decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements. + Clients should prompt users for additional input to break the tie. - When omitted, the default priority is 0 because that is the zero value of integers. + When omitted, the default priority is 0. - Negative numbers can be used to specify a priority lower than the default. - Positive numbers can be used to specify a priority higher than the default. + Use negative numbers to specify a priority lower than the default. + Use positive numbers to specify a priority higher than the default. The lowest possible value is -2147483648. The highest possible value is 2147483647. format: int32 + maximum: 2147483647 + minimum: -2147483648 type: integer source: description: |- - source allows a user to define the source of a catalog. - A "catalog" contains information on content that can be installed on a cluster. - Providing a catalog source makes the contents of the catalog discoverable and usable by - other on-cluster components. - These on-cluster components may do a variety of things with this information, such as - presenting the content in a GUI dashboard or installing content from the catalog on the cluster. + source is a required field that defines the source of a catalog. + A catalog contains information on content that can be installed on a cluster. + The catalog source makes catalog contents discoverable and usable by other on-cluster components. + These components can present the content in a GUI dashboard or install content from the catalog on the cluster. The catalog source must contain catalog metadata in the File-Based Catalog (FBC) format. For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs. - source is a required field. Below is a minimal example of a ClusterCatalogSpec that sources a catalog from an image: @@ -120,25 +113,23 @@ spec: properties: image: description: |- - image is used to configure how catalog contents are sourced from an OCI image. - This field is required when type is Image, and forbidden otherwise. + image configures how catalog contents are sourced from an OCI image. + It is required when type is Image, and forbidden otherwise. properties: pollIntervalMinutes: description: |- - pollIntervalMinutes allows the user to set the interval, in minutes, at which the image source should be polled for new content. - pollIntervalMinutes is optional. - pollIntervalMinutes can not be specified when ref is a digest-based reference. + pollIntervalMinutes is an optional field that sets the interval, in minutes, at which the image source is polled for new content. + You cannot specify pollIntervalMinutes when ref is a digest-based reference. - When omitted, the image will not be polled for new content. + When omitted, the image is not polled for new content. minimum: 1 type: integer ref: description: |- - ref allows users to define the reference to a container image containing Catalog contents. - ref is required. - ref can not be more than 1000 characters. + ref is a required field that defines the reference to a container image containing catalog contents. + It cannot be more than 1000 characters. - A reference can be broken down into 3 parts - the domain, name, and identifier. + A reference has 3 parts: the domain, name, and identifier. The domain is typically the registry where an image is located. It must be alphanumeric characters (lowercase and uppercase) separated by the "." character. @@ -221,12 +212,11 @@ spec: : true' type: description: |- - type is a reference to the type of source the catalog is sourced from. - type is required. + type is a required field that specifies the type of source for the catalog. The only allowed value is "Image". - When set to "Image", the ClusterCatalog content will be sourced from an OCI image. + When set to "Image", the ClusterCatalog content is sourced from an OCI image. When using an image source, the image field must be set and must be the only field defined for this type. enum: - Image @@ -244,31 +234,30 @@ spec: type: object status: description: |- - status contains information about the state of the ClusterCatalog such as: - - Whether or not the catalog contents are being served via the catalog content HTTP server - - Whether or not the ClusterCatalog is progressing to a new state + status contains the following information about the state of the ClusterCatalog: + - Whether the catalog contents are being served via the catalog content HTTP server + - Whether the ClusterCatalog is progressing to a new state - A reference to the source from which the catalog contents were retrieved properties: conditions: description: |- - conditions is a representation of the current state for this ClusterCatalog. + conditions represents the current state of this ClusterCatalog. The current condition types are Serving and Progressing. - The Serving condition is used to represent whether or not the contents of the catalog is being served via the HTTP(S) web server. - When it has a status of True and a reason of Available, the contents of the catalog are being served. - When it has a status of False and a reason of Unavailable, the contents of the catalog are not being served because the contents are not yet available. - When it has a status of False and a reason of UserSpecifiedUnavailable, the contents of the catalog are not being served because the catalog has been intentionally marked as unavailable. + The Serving condition represents whether the catalog contents are being served via the HTTP(S) web server: + - When status is True and reason is Available, the catalog contents are being served. + - When status is False and reason is Unavailable, the catalog contents are not being served because the contents are not yet available. + - When status is False and reason is UserSpecifiedUnavailable, the catalog contents are not being served because the catalog has been intentionally marked as unavailable. - The Progressing condition is used to represent whether or not the ClusterCatalog is progressing or is ready to progress towards a new state. - When it has a status of True and a reason of Retrying, there was an error in the progression of the ClusterCatalog that may be resolved on subsequent reconciliation attempts. - When it has a status of True and a reason of Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing. - When it has a status of False and a reason of Blocked, there was an error in the progression of the ClusterCatalog that requires manual intervention for recovery. + The Progressing condition represents whether the ClusterCatalog is progressing or is ready to progress towards a new state: + - When status is True and reason is Retrying, an error occurred that may be resolved on subsequent reconciliation attempts. + - When status is True and reason is Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing. + - When status is False and reason is Blocked, an error occurred that requires manual intervention for recovery. - In the case that the Serving condition is True with reason Available and Progressing is True with reason Retrying, the previously fetched - catalog contents are still being served via the HTTP(S) web server while we are progressing towards serving a new version of the catalog - contents. This could occur when we've initially fetched the latest contents from the source for this catalog and when polling for changes - to the contents we identify that there are updates to the contents. + If the system initially fetched contents and polling identifies updates, both conditions can be active simultaneously: + - The Serving condition remains True with reason Available because the previous contents are still served via the HTTP(S) web server. + - The Progressing condition is True with reason Retrying because the system is working to serve the new version. items: description: Condition contains details for one aspect of the current state of this API Resource. @@ -329,11 +318,9 @@ spec: x-kubernetes-list-type: map lastUnpacked: description: |- - lastUnpacked represents the last time the contents of the - catalog were extracted from their source format. As an example, - when using an Image source, the OCI image will be pulled and the - image layers written to a file-system backed cache. We refer to the - act of this extraction from the source format as "unpacking". + lastUnpacked represents the last time the catalog contents were extracted from their source format. + For example, when using an Image source, the OCI image is pulled and image layers are written to a file-system backed cache. + This extraction from the source format is called "unpacking". format: date-time type: string resolvedSource: @@ -342,14 +329,14 @@ spec: properties: image: description: |- - image is a field containing resolution information for a catalog sourced from an image. - This field must be set when type is Image, and forbidden otherwise. + image contains resolution information for a catalog sourced from an image. + It must be set when type is Image, and forbidden otherwise. properties: ref: description: |- ref contains the resolved image digest-based reference. - The digest format is used so users can use other tooling to fetch the exact - OCI manifests that were used to extract the catalog contents. + The digest format allows you to use other tooling to fetch the exact OCI manifests + that were used to extract the catalog contents. maxLength: 1000 type: string x-kubernetes-validations: @@ -383,12 +370,11 @@ spec: type: object type: description: |- - type is a reference to the type of source the catalog is sourced from. - type is required. + type is a required field that specifies the type of source for the catalog. The only allowed value is "Image". - When set to "Image", information about the resolved image source will be set in the 'image' field. + When set to "Image", information about the resolved image source is set in the image field. enum: - Image type: string @@ -407,19 +393,16 @@ spec: properties: base: description: |- - base is a cluster-internal URL that provides endpoints for - accessing the content of the catalog. + base is a cluster-internal URL that provides endpoints for accessing the catalog content. - It is expected that clients append the path for the endpoint they wish - to access. + Clients should append the path for the endpoint they want to access. - Currently, only a single endpoint is served and is accessible at the path - /api/v1. + Currently, only a single endpoint is served and is accessible at the path /api/v1. The endpoints served for the v1 API are: - - /all - this endpoint returns the entirety of the catalog contents in the FBC format + - /all - this endpoint returns the entire catalog contents in the FBC format - As the needs of users and clients of the evolve, new endpoints may be added. + New endpoints may be added as needs evolve. maxLength: 525 type: string x-kubernetes-validations: diff --git a/helm/olmv1/base/catalogd/crd/standard/olm.operatorframework.io_clustercatalogs.yaml b/helm/olmv1/base/catalogd/crd/standard/olm.operatorframework.io_clustercatalogs.yaml index 94f1d7121..1a97fdfe0 100644 --- a/helm/olmv1/base/catalogd/crd/standard/olm.operatorframework.io_clustercatalogs.yaml +++ b/helm/olmv1/base/catalogd/crd/standard/olm.operatorframework.io_clustercatalogs.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 olm.operatorframework.io/generator: standard name: clustercatalogs.olm.operatorframework.io spec: @@ -29,7 +29,7 @@ spec: schema: openAPIV3Schema: description: |- - ClusterCatalog enables users to make File-Based Catalog (FBC) catalog data available to the cluster. + ClusterCatalog makes File-Based Catalog (FBC) data available to your cluster. For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs properties: apiVersion: @@ -51,29 +51,24 @@ spec: type: object spec: description: |- - spec is the desired state of the ClusterCatalog. - spec is required. - The controller will work to ensure that the desired - catalog is unpacked and served over the catalog content HTTP server. + spec is a required field that defines the desired state of the ClusterCatalog. + The controller ensures that the catalog is unpacked and served over the catalog content HTTP server. properties: availabilityMode: default: Available description: |- - availabilityMode allows users to define how the ClusterCatalog is made available to clients on the cluster. - availabilityMode is optional. + availabilityMode is an optional field that defines how the ClusterCatalog is made available to clients on the cluster. - Allowed values are "Available" and "Unavailable" and omitted. + Allowed values are "Available", "Unavailable", or omitted. When omitted, the default value is "Available". - When set to "Available", the catalog contents will be unpacked and served over the catalog content HTTP server. - Setting the availabilityMode to "Available" tells clients that they should consider this ClusterCatalog - and its contents as usable. + When set to "Available", the catalog contents are unpacked and served over the catalog content HTTP server. + Clients should consider this ClusterCatalog and its contents as usable. - When set to "Unavailable", the catalog contents will no longer be served over the catalog content HTTP server. - When set to this availabilityMode it should be interpreted the same as the ClusterCatalog not existing. - Setting the availabilityMode to "Unavailable" can be useful in scenarios where a user may not want - to delete the ClusterCatalog all together, but would still like it to be treated as if it doesn't exist. + When set to "Unavailable", the catalog contents are no longer served over the catalog content HTTP server. + Treat this the same as if the ClusterCatalog does not exist. + Use "Unavailable" when you want to keep the ClusterCatalog but treat it as if it doesn't exist. enum: - Unavailable - Available @@ -81,35 +76,33 @@ spec: priority: default: 0 description: |- - priority allows the user to define a priority for a ClusterCatalog. - priority is optional. + priority is an optional field that defines a priority for this ClusterCatalog. - A ClusterCatalog's priority is used by clients as a tie-breaker between ClusterCatalogs that meet the client's requirements. - A higher number means higher priority. + Clients use the ClusterCatalog priority as a tie-breaker between ClusterCatalogs that meet their requirements. + Higher numbers mean higher priority. - It is up to clients to decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements. - When deciding how to break the tie in this scenario, it is recommended that clients prompt their users for additional input. + Clients decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements. + Clients should prompt users for additional input to break the tie. - When omitted, the default priority is 0 because that is the zero value of integers. + When omitted, the default priority is 0. - Negative numbers can be used to specify a priority lower than the default. - Positive numbers can be used to specify a priority higher than the default. + Use negative numbers to specify a priority lower than the default. + Use positive numbers to specify a priority higher than the default. The lowest possible value is -2147483648. The highest possible value is 2147483647. format: int32 + maximum: 2147483647 + minimum: -2147483648 type: integer source: description: |- - source allows a user to define the source of a catalog. - A "catalog" contains information on content that can be installed on a cluster. - Providing a catalog source makes the contents of the catalog discoverable and usable by - other on-cluster components. - These on-cluster components may do a variety of things with this information, such as - presenting the content in a GUI dashboard or installing content from the catalog on the cluster. + source is a required field that defines the source of a catalog. + A catalog contains information on content that can be installed on a cluster. + The catalog source makes catalog contents discoverable and usable by other on-cluster components. + These components can present the content in a GUI dashboard or install content from the catalog on the cluster. The catalog source must contain catalog metadata in the File-Based Catalog (FBC) format. For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs. - source is a required field. Below is a minimal example of a ClusterCatalogSpec that sources a catalog from an image: @@ -120,25 +113,23 @@ spec: properties: image: description: |- - image is used to configure how catalog contents are sourced from an OCI image. - This field is required when type is Image, and forbidden otherwise. + image configures how catalog contents are sourced from an OCI image. + It is required when type is Image, and forbidden otherwise. properties: pollIntervalMinutes: description: |- - pollIntervalMinutes allows the user to set the interval, in minutes, at which the image source should be polled for new content. - pollIntervalMinutes is optional. - pollIntervalMinutes can not be specified when ref is a digest-based reference. + pollIntervalMinutes is an optional field that sets the interval, in minutes, at which the image source is polled for new content. + You cannot specify pollIntervalMinutes when ref is a digest-based reference. - When omitted, the image will not be polled for new content. + When omitted, the image is not polled for new content. minimum: 1 type: integer ref: description: |- - ref allows users to define the reference to a container image containing Catalog contents. - ref is required. - ref can not be more than 1000 characters. + ref is a required field that defines the reference to a container image containing catalog contents. + It cannot be more than 1000 characters. - A reference can be broken down into 3 parts - the domain, name, and identifier. + A reference has 3 parts: the domain, name, and identifier. The domain is typically the registry where an image is located. It must be alphanumeric characters (lowercase and uppercase) separated by the "." character. @@ -221,12 +212,11 @@ spec: : true' type: description: |- - type is a reference to the type of source the catalog is sourced from. - type is required. + type is a required field that specifies the type of source for the catalog. The only allowed value is "Image". - When set to "Image", the ClusterCatalog content will be sourced from an OCI image. + When set to "Image", the ClusterCatalog content is sourced from an OCI image. When using an image source, the image field must be set and must be the only field defined for this type. enum: - Image @@ -244,31 +234,30 @@ spec: type: object status: description: |- - status contains information about the state of the ClusterCatalog such as: - - Whether or not the catalog contents are being served via the catalog content HTTP server - - Whether or not the ClusterCatalog is progressing to a new state + status contains the following information about the state of the ClusterCatalog: + - Whether the catalog contents are being served via the catalog content HTTP server + - Whether the ClusterCatalog is progressing to a new state - A reference to the source from which the catalog contents were retrieved properties: conditions: description: |- - conditions is a representation of the current state for this ClusterCatalog. + conditions represents the current state of this ClusterCatalog. The current condition types are Serving and Progressing. - The Serving condition is used to represent whether or not the contents of the catalog is being served via the HTTP(S) web server. - When it has a status of True and a reason of Available, the contents of the catalog are being served. - When it has a status of False and a reason of Unavailable, the contents of the catalog are not being served because the contents are not yet available. - When it has a status of False and a reason of UserSpecifiedUnavailable, the contents of the catalog are not being served because the catalog has been intentionally marked as unavailable. + The Serving condition represents whether the catalog contents are being served via the HTTP(S) web server: + - When status is True and reason is Available, the catalog contents are being served. + - When status is False and reason is Unavailable, the catalog contents are not being served because the contents are not yet available. + - When status is False and reason is UserSpecifiedUnavailable, the catalog contents are not being served because the catalog has been intentionally marked as unavailable. - The Progressing condition is used to represent whether or not the ClusterCatalog is progressing or is ready to progress towards a new state. - When it has a status of True and a reason of Retrying, there was an error in the progression of the ClusterCatalog that may be resolved on subsequent reconciliation attempts. - When it has a status of True and a reason of Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing. - When it has a status of False and a reason of Blocked, there was an error in the progression of the ClusterCatalog that requires manual intervention for recovery. + The Progressing condition represents whether the ClusterCatalog is progressing or is ready to progress towards a new state: + - When status is True and reason is Retrying, an error occurred that may be resolved on subsequent reconciliation attempts. + - When status is True and reason is Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing. + - When status is False and reason is Blocked, an error occurred that requires manual intervention for recovery. - In the case that the Serving condition is True with reason Available and Progressing is True with reason Retrying, the previously fetched - catalog contents are still being served via the HTTP(S) web server while we are progressing towards serving a new version of the catalog - contents. This could occur when we've initially fetched the latest contents from the source for this catalog and when polling for changes - to the contents we identify that there are updates to the contents. + If the system initially fetched contents and polling identifies updates, both conditions can be active simultaneously: + - The Serving condition remains True with reason Available because the previous contents are still served via the HTTP(S) web server. + - The Progressing condition is True with reason Retrying because the system is working to serve the new version. items: description: Condition contains details for one aspect of the current state of this API Resource. @@ -329,11 +318,9 @@ spec: x-kubernetes-list-type: map lastUnpacked: description: |- - lastUnpacked represents the last time the contents of the - catalog were extracted from their source format. As an example, - when using an Image source, the OCI image will be pulled and the - image layers written to a file-system backed cache. We refer to the - act of this extraction from the source format as "unpacking". + lastUnpacked represents the last time the catalog contents were extracted from their source format. + For example, when using an Image source, the OCI image is pulled and image layers are written to a file-system backed cache. + This extraction from the source format is called "unpacking". format: date-time type: string resolvedSource: @@ -342,14 +329,14 @@ spec: properties: image: description: |- - image is a field containing resolution information for a catalog sourced from an image. - This field must be set when type is Image, and forbidden otherwise. + image contains resolution information for a catalog sourced from an image. + It must be set when type is Image, and forbidden otherwise. properties: ref: description: |- ref contains the resolved image digest-based reference. - The digest format is used so users can use other tooling to fetch the exact - OCI manifests that were used to extract the catalog contents. + The digest format allows you to use other tooling to fetch the exact OCI manifests + that were used to extract the catalog contents. maxLength: 1000 type: string x-kubernetes-validations: @@ -383,12 +370,11 @@ spec: type: object type: description: |- - type is a reference to the type of source the catalog is sourced from. - type is required. + type is a required field that specifies the type of source for the catalog. The only allowed value is "Image". - When set to "Image", information about the resolved image source will be set in the 'image' field. + When set to "Image", information about the resolved image source is set in the image field. enum: - Image type: string @@ -407,19 +393,16 @@ spec: properties: base: description: |- - base is a cluster-internal URL that provides endpoints for - accessing the content of the catalog. + base is a cluster-internal URL that provides endpoints for accessing the catalog content. - It is expected that clients append the path for the endpoint they wish - to access. + Clients should append the path for the endpoint they want to access. - Currently, only a single endpoint is served and is accessible at the path - /api/v1. + Currently, only a single endpoint is served and is accessible at the path /api/v1. The endpoints served for the v1 API are: - - /all - this endpoint returns the entirety of the catalog contents in the FBC format + - /all - this endpoint returns the entire catalog contents in the FBC format - As the needs of users and clients of the evolve, new endpoints may be added. + New endpoints may be added as needs evolve. maxLength: 525 type: string x-kubernetes-validations: diff --git a/helm/olmv1/base/operator-controller/crd/experimental/olm.operatorframework.io_clusterextensionrevisions.yaml b/helm/olmv1/base/operator-controller/crd/experimental/olm.operatorframework.io_clusterextensionrevisions.yaml index c448a4da4..1e435dc70 100644 --- a/helm/olmv1/base/operator-controller/crd/experimental/olm.operatorframework.io_clusterextensionrevisions.yaml +++ b/helm/olmv1/base/operator-controller/crd/experimental/olm.operatorframework.io_clusterextensionrevisions.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 olm.operatorframework.io/generator: experimental name: clusterextensionrevisions.olm.operatorframework.io spec: diff --git a/helm/olmv1/base/operator-controller/crd/experimental/olm.operatorframework.io_clusterextensions.yaml b/helm/olmv1/base/operator-controller/crd/experimental/olm.operatorframework.io_clusterextensions.yaml index 0d1bbd71c..7194392b6 100644 --- a/helm/olmv1/base/operator-controller/crd/experimental/olm.operatorframework.io_clusterextensions.yaml +++ b/helm/olmv1/base/operator-controller/crd/experimental/olm.operatorframework.io_clusterextensions.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 olm.operatorframework.io/generator: experimental name: clusterextensions.olm.operatorframework.io spec: @@ -59,9 +59,9 @@ spec: properties: config: description: |- - config is an optional field used to specify bundle specific configuration - used to configure the bundle. Configuration is bundle specific and a bundle may provide - a configuration schema. When not specified, the default configuration of the resolved bundle will be used. + config is optional and specifies bundle-specific configuration. + Configuration is bundle-specific and a bundle may provide a configuration schema. + When not specified, the default configuration of the resolved bundle is used. config is validated against a configuration schema provided by the resolved bundle. If the bundle does not provide a configuration schema the bundle is deemed to not be configurable. More information on how @@ -69,21 +69,19 @@ spec: properties: configType: description: |- - configType is a required reference to the type of configuration source. + configType is required and specifies the type of configuration source. - Allowed values are "Inline" + The only allowed value is "Inline". - When this field is set to "Inline", the cluster extension configuration is defined inline within the - ClusterExtension resource. + When set to "Inline", the cluster extension configuration is defined inline within the ClusterExtension resource. enum: - Inline type: string inline: description: |- - inline contains JSON or YAML values specified directly in the - ClusterExtension. + inline contains JSON or YAML values specified directly in the ClusterExtension. - inline is used to specify arbitrary configuration values for the ClusterExtension. + It is used to specify arbitrary configuration values for the ClusterExtension. It must be set if configType is 'Inline' and must be a valid JSON/YAML object containing at least one property. The configuration values are validated at runtime against a JSON schema provided by the bundle. minProperties: 1 @@ -99,37 +97,35 @@ spec: : !has(self.inline)' install: description: |- - install is an optional field used to configure the installation options - for the ClusterExtension such as the pre-flight check configuration. + install is optional and configures installation options for the ClusterExtension, + such as the pre-flight check configuration. properties: preflight: description: |- - preflight is an optional field that can be used to configure the checks that are - run before installation or upgrade of the content for the package specified in the packageName field. + preflight is optional and configures the checks that run before installation or upgrade + of the content for the package specified in the packageName field. When specified, it replaces the default preflight configuration for install/upgrade actions. - When not specified, the default configuration will be used. + When not specified, the default configuration is used. properties: crdUpgradeSafety: description: |- - crdUpgradeSafety is used to configure the CRD Upgrade Safety pre-flight - checks that run prior to upgrades of installed content. + crdUpgradeSafety configures the CRD Upgrade Safety pre-flight checks that run + before upgrades of installed content. - The CRD Upgrade Safety pre-flight check safeguards from unintended - consequences of upgrading a CRD, such as data loss. + The CRD Upgrade Safety pre-flight check safeguards from unintended consequences of upgrading a CRD, + such as data loss. properties: enforcement: description: |- - enforcement is a required field, used to configure the state of the CRD Upgrade Safety pre-flight check. + enforcement is required and configures the state of the CRD Upgrade Safety pre-flight check. Allowed values are "None" or "Strict". The default value is "Strict". - When set to "None", the CRD Upgrade Safety pre-flight check will be skipped - when performing an upgrade operation. This should be used with caution as - unintended consequences such as data loss can occur. + When set to "None", the CRD Upgrade Safety pre-flight check is skipped during an upgrade operation. + Use this option with caution as unintended consequences such as data loss can occur. - When set to "Strict", the CRD Upgrade Safety pre-flight check will be run when - performing an upgrade operation. + When set to "Strict", the CRD Upgrade Safety pre-flight check runs during an upgrade operation. enum: - None - Strict @@ -151,16 +147,15 @@ spec: rule: has(self.preflight) namespace: description: |- - namespace is a reference to a Kubernetes namespace. - This is the namespace in which the provided ServiceAccount must exist. - It also designates the default namespace where namespace-scoped resources - for the extension are applied to the cluster. + namespace specifies a Kubernetes namespace. + This is the namespace where the provided ServiceAccount must exist. + It also designates the default namespace where namespace-scoped resources for the extension are applied to the cluster. Some extensions may contain namespace-scoped resources to be applied in other namespaces. This namespace must exist. - namespace is required, immutable, and follows the DNS label standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters or hyphens (-), - start and end with an alphanumeric character, and be no longer than 63 characters + The namespace field is required, immutable, and follows the DNS label standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters or hyphens (-), start and end with an alphanumeric character, + and be no longer than 63 characters. [RFC 1123]: https://tools.ietf.org/html/rfc1123 maxLength: 63 @@ -172,24 +167,22 @@ spec: rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?$") serviceAccount: description: |- - serviceAccount is a reference to a ServiceAccount used to perform all interactions - with the cluster that are required to manage the extension. + serviceAccount specifies a ServiceAccount used to perform all interactions with the cluster + that are required to manage the extension. The ServiceAccount must be configured with the necessary permissions to perform these interactions. The ServiceAccount must exist in the namespace referenced in the spec. - serviceAccount is required. + The serviceAccount field is required. properties: name: description: |- - name is a required, immutable reference to the name of the ServiceAccount - to be used for installation and management of the content for the package - specified in the packageName field. + name is a required, immutable reference to the name of the ServiceAccount used for installation + and management of the content for the package specified in the packageName field. This ServiceAccount must exist in the installNamespace. - name follows the DNS subdomain standard as defined in [RFC 1123]. - It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. + The name field follows the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. Some examples of valid values are: - some-serviceaccount @@ -218,11 +211,11 @@ spec: type: object source: description: |- - source is a required field which selects the installation source of content - for this ClusterExtension. Selection is performed by setting the sourceType. + source is required and selects the installation source of content for this ClusterExtension. + Set the sourceType field to perform the selection. - Catalog is currently the only implemented sourceType, and setting the - sourcetype to "Catalog" requires the catalog field to also be defined. + Catalog is currently the only implemented sourceType. + Setting sourceType to "Catalog" requires the catalog field to also be defined. Below is a minimal example of a source definition (in yaml): @@ -233,30 +226,29 @@ spec: properties: catalog: description: |- - catalog is used to configure how information is sourced from a catalog. - This field is required when sourceType is "Catalog", and forbidden otherwise. + catalog configures how information is sourced from a catalog. + It is required when sourceType is "Catalog", and forbidden otherwise. properties: channels: description: |- - channels is an optional reference to a set of channels belonging to - the package specified in the packageName field. + channels is optional and specifies a set of channels belonging to the package + specified in the packageName field. - A "channel" is a package-author-defined stream of updates for an extension. + A channel is a package-author-defined stream of updates for an extension. - Each channel in the list must follow the DNS subdomain standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. No more than 256 channels can be specified. + Each channel in the list must follow the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. + You can specify no more than 256 channels. - When specified, it is used to constrain the set of installable bundles and - the automated upgrade path. This constraint is an AND operation with the - version field. For example: + When specified, it constrains the set of installable bundles and the automated upgrade path. + This constraint is an AND operation with the version field. For example: - Given channel is set to "foo" - Given version is set to ">=1.0.0, <1.5.0" - - Only bundles that exist in channel "foo" AND satisfy the version range comparison will be considered installable - - Automatic upgrades will be constrained to upgrade edges defined by the selected channel + - Only bundles that exist in channel "foo" AND satisfy the version range comparison are considered installable + - Automatic upgrades are constrained to upgrade edges defined by the selected channel - When unspecified, upgrade edges across all channels will be used to identify valid automatic upgrade paths. + When unspecified, upgrade edges across all channels are used to identify valid automatic upgrade paths. Some examples of valid values are: - 1.1.x @@ -286,13 +278,12 @@ spec: type: array packageName: description: |- - packageName is a reference to the name of the package to be installed - and is used to filter the content from catalogs. + packageName specifies the name of the package to be installed and is used to filter + the content from catalogs. - packageName is required, immutable, and follows the DNS subdomain standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. + It is required, immutable, and follows the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. Some examples of valid values are: - some-package @@ -319,12 +310,9 @@ spec: rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$") selector: description: |- - selector is an optional field that can be used - to filter the set of ClusterCatalogs used in the bundle - selection process. + selector is optional and filters the set of ClusterCatalogs used in the bundle selection process. - When unspecified, all ClusterCatalogs will be used in - the bundle selection process. + When unspecified, all ClusterCatalogs are used in the bundle selection process. properties: matchExpressions: description: matchExpressions is a list of label selector @@ -372,35 +360,34 @@ spec: upgradeConstraintPolicy: default: CatalogProvided description: |- - upgradeConstraintPolicy is an optional field that controls whether - the upgrade path(s) defined in the catalog are enforced for the package - referenced in the packageName field. + upgradeConstraintPolicy is optional and controls whether the upgrade paths defined in the catalog + are enforced for the package referenced in the packageName field. - Allowed values are: "CatalogProvided" or "SelfCertified", or omitted. + Allowed values are "CatalogProvided", "SelfCertified", or omitted. - When this field is set to "CatalogProvided", automatic upgrades will only occur - when upgrade constraints specified by the package author are met. + When set to "CatalogProvided", automatic upgrades only occur when upgrade constraints specified by the package + author are met. - When this field is set to "SelfCertified", the upgrade constraints specified by - the package author are ignored. This allows for upgrades and downgrades to - any version of the package. This is considered a dangerous operation as it - can lead to unknown and potentially disastrous outcomes, such as data - loss. It is assumed that users have independently verified changes when - using this option. + When set to "SelfCertified", the upgrade constraints specified by the package author are ignored. + This allows upgrades and downgrades to any version of the package. + This is considered a dangerous operation as it can lead to unknown and potentially disastrous outcomes, + such as data loss. + Use this option only if you have independently verified the changes. - When this field is omitted, the default value is "CatalogProvided". + When omitted, the default value is "CatalogProvided". enum: - CatalogProvided - SelfCertified type: string version: description: |- - version is an optional semver constraint (a specific version or range of versions). When unspecified, the latest version available will be installed. + version is an optional semver constraint (a specific version or range of versions). + When unspecified, the latest version available is installed. Acceptable version ranges are no longer than 64 characters. - Version ranges are composed of comma- or space-delimited values and one or - more comparison operators, known as comparison strings. Additional - comparison strings can be added using the OR operator (||). + Version ranges are composed of comma- or space-delimited values and one or more comparison operators, + known as comparison strings. + You can add additional comparison strings using the OR operator (||). # Range Comparisons @@ -478,13 +465,12 @@ spec: type: object sourceType: description: |- - sourceType is a required reference to the type of install source. + sourceType is required and specifies the type of install source. - Allowed values are "Catalog" + The only allowed value is "Catalog". - When this field is set to "Catalog", information for determining the - appropriate bundle of content to install will be fetched from - ClusterCatalog resources existing on the cluster. + When set to "Catalog", information for determining the appropriate bundle of content to install + is fetched from ClusterCatalog resources on the cluster. When using the Catalog sourceType, the catalog field must also be set. enum: - Catalog @@ -590,11 +576,13 @@ spec: x-kubernetes-list-type: map conditions: description: |- + conditions represents the current state of the ClusterExtension. + The set of condition types which apply to all spec.source variations are Installed and Progressing. - The Installed condition represents whether or not the bundle has been installed for this ClusterExtension. - When Installed is True and the Reason is Succeeded, the bundle has been successfully installed. - When Installed is False and the Reason is Failed, the bundle has failed to install. + The Installed condition represents whether the bundle has been installed for this ClusterExtension: + - When Installed is True and the Reason is Succeeded, the bundle has been successfully installed. + - When Installed is False and the Reason is Failed, the bundle has failed to install. The Progressing condition represents whether or not the ClusterExtension is advancing towards a new state. When Progressing is True and the Reason is Succeeded, the ClusterExtension is making progress towards a new state. @@ -603,12 +591,12 @@ spec: When Progressing is True and Reason is RollingOut, the ClusterExtension has one or more ClusterExtensionRevisions in active roll out. - When the ClusterExtension is sourced from a catalog, if may also communicate a deprecation condition. - These are indications from a package owner to guide users away from a particular package, channel, or bundle. - BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog. - ChannelDeprecated is set if the requested channel is marked deprecated in the catalog. - PackageDeprecated is set if the requested package is marked deprecated in the catalog. - Deprecated is a rollup condition that is present when any of the deprecated conditions are present. + When the ClusterExtension is sourced from a catalog, it may also communicate a deprecation condition. + These are indications from a package owner to guide users away from a particular package, channel, or bundle: + - BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog. + - ChannelDeprecated is set if the requested channel is marked deprecated in the catalog. + - PackageDeprecated is set if the requested package is marked deprecated in the catalog. + - Deprecated is a rollup condition that is present when any of the deprecated conditions are present. items: description: Condition contains details for one aspect of the current state of this API Resource. @@ -673,17 +661,16 @@ spec: properties: bundle: description: |- - bundle is a required field which represents the identifying attributes of a bundle. + bundle is required and represents the identifying attributes of a bundle. - A "bundle" is a versioned set of content that represents the resources that - need to be applied to a cluster to install a package. + A "bundle" is a versioned set of content that represents the resources that need to be applied + to a cluster to install a package. properties: name: description: |- - name is required and follows the DNS subdomain standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. + name is required and follows the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. type: string x-kubernetes-validations: - message: packageName must be a valid DNS1123 subdomain. @@ -693,8 +680,8 @@ spec: rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$") version: description: |- - version is a required field and is a reference to the version that this bundle represents - version follows the semantic versioning standard as defined in https://semver.org/. + version is required and references the version that this bundle represents. + It follows the semantic versioning standard as defined in https://semver.org/. type: string x-kubernetes-validations: - message: version must be well-formed semver diff --git a/helm/olmv1/base/operator-controller/crd/standard/olm.operatorframework.io_clusterextensions.yaml b/helm/olmv1/base/operator-controller/crd/standard/olm.operatorframework.io_clusterextensions.yaml index a0983e41f..ca316f7e8 100644 --- a/helm/olmv1/base/operator-controller/crd/standard/olm.operatorframework.io_clusterextensions.yaml +++ b/helm/olmv1/base/operator-controller/crd/standard/olm.operatorframework.io_clusterextensions.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 olm.operatorframework.io/generator: standard name: clusterextensions.olm.operatorframework.io spec: @@ -57,39 +57,75 @@ spec: description: spec is an optional field that defines the desired state of the ClusterExtension. properties: + config: + description: |- + config is optional and specifies bundle-specific configuration. + Configuration is bundle-specific and a bundle may provide a configuration schema. + When not specified, the default configuration of the resolved bundle is used. + + config is validated against a configuration schema provided by the resolved bundle. If the bundle does not provide + a configuration schema the bundle is deemed to not be configurable. More information on how + to configure bundles can be found in the OLM documentation associated with your current OLM version. + properties: + configType: + description: |- + configType is required and specifies the type of configuration source. + + The only allowed value is "Inline". + + When set to "Inline", the cluster extension configuration is defined inline within the ClusterExtension resource. + enum: + - Inline + type: string + inline: + description: |- + inline contains JSON or YAML values specified directly in the ClusterExtension. + + It is used to specify arbitrary configuration values for the ClusterExtension. + It must be set if configType is 'Inline' and must be a valid JSON/YAML object containing at least one property. + The configuration values are validated at runtime against a JSON schema provided by the bundle. + minProperties: 1 + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - configType + type: object + x-kubernetes-validations: + - message: inline is required when configType is Inline, and forbidden + otherwise + rule: 'has(self.configType) && self.configType == ''Inline'' ?has(self.inline) + : !has(self.inline)' install: description: |- - install is an optional field used to configure the installation options - for the ClusterExtension such as the pre-flight check configuration. + install is optional and configures installation options for the ClusterExtension, + such as the pre-flight check configuration. properties: preflight: description: |- - preflight is an optional field that can be used to configure the checks that are - run before installation or upgrade of the content for the package specified in the packageName field. + preflight is optional and configures the checks that run before installation or upgrade + of the content for the package specified in the packageName field. When specified, it replaces the default preflight configuration for install/upgrade actions. - When not specified, the default configuration will be used. + When not specified, the default configuration is used. properties: crdUpgradeSafety: description: |- - crdUpgradeSafety is used to configure the CRD Upgrade Safety pre-flight - checks that run prior to upgrades of installed content. + crdUpgradeSafety configures the CRD Upgrade Safety pre-flight checks that run + before upgrades of installed content. - The CRD Upgrade Safety pre-flight check safeguards from unintended - consequences of upgrading a CRD, such as data loss. + The CRD Upgrade Safety pre-flight check safeguards from unintended consequences of upgrading a CRD, + such as data loss. properties: enforcement: description: |- - enforcement is a required field, used to configure the state of the CRD Upgrade Safety pre-flight check. + enforcement is required and configures the state of the CRD Upgrade Safety pre-flight check. Allowed values are "None" or "Strict". The default value is "Strict". - When set to "None", the CRD Upgrade Safety pre-flight check will be skipped - when performing an upgrade operation. This should be used with caution as - unintended consequences such as data loss can occur. + When set to "None", the CRD Upgrade Safety pre-flight check is skipped during an upgrade operation. + Use this option with caution as unintended consequences such as data loss can occur. - When set to "Strict", the CRD Upgrade Safety pre-flight check will be run when - performing an upgrade operation. + When set to "Strict", the CRD Upgrade Safety pre-flight check runs during an upgrade operation. enum: - None - Strict @@ -111,16 +147,15 @@ spec: rule: has(self.preflight) namespace: description: |- - namespace is a reference to a Kubernetes namespace. - This is the namespace in which the provided ServiceAccount must exist. - It also designates the default namespace where namespace-scoped resources - for the extension are applied to the cluster. + namespace specifies a Kubernetes namespace. + This is the namespace where the provided ServiceAccount must exist. + It also designates the default namespace where namespace-scoped resources for the extension are applied to the cluster. Some extensions may contain namespace-scoped resources to be applied in other namespaces. This namespace must exist. - namespace is required, immutable, and follows the DNS label standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters or hyphens (-), - start and end with an alphanumeric character, and be no longer than 63 characters + The namespace field is required, immutable, and follows the DNS label standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters or hyphens (-), start and end with an alphanumeric character, + and be no longer than 63 characters. [RFC 1123]: https://tools.ietf.org/html/rfc1123 maxLength: 63 @@ -132,24 +167,22 @@ spec: rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?$") serviceAccount: description: |- - serviceAccount is a reference to a ServiceAccount used to perform all interactions - with the cluster that are required to manage the extension. + serviceAccount specifies a ServiceAccount used to perform all interactions with the cluster + that are required to manage the extension. The ServiceAccount must be configured with the necessary permissions to perform these interactions. The ServiceAccount must exist in the namespace referenced in the spec. - serviceAccount is required. + The serviceAccount field is required. properties: name: description: |- - name is a required, immutable reference to the name of the ServiceAccount - to be used for installation and management of the content for the package - specified in the packageName field. + name is a required, immutable reference to the name of the ServiceAccount used for installation + and management of the content for the package specified in the packageName field. This ServiceAccount must exist in the installNamespace. - name follows the DNS subdomain standard as defined in [RFC 1123]. - It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. + The name field follows the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. Some examples of valid values are: - some-serviceaccount @@ -178,11 +211,11 @@ spec: type: object source: description: |- - source is a required field which selects the installation source of content - for this ClusterExtension. Selection is performed by setting the sourceType. + source is required and selects the installation source of content for this ClusterExtension. + Set the sourceType field to perform the selection. - Catalog is currently the only implemented sourceType, and setting the - sourcetype to "Catalog" requires the catalog field to also be defined. + Catalog is currently the only implemented sourceType. + Setting sourceType to "Catalog" requires the catalog field to also be defined. Below is a minimal example of a source definition (in yaml): @@ -193,30 +226,29 @@ spec: properties: catalog: description: |- - catalog is used to configure how information is sourced from a catalog. - This field is required when sourceType is "Catalog", and forbidden otherwise. + catalog configures how information is sourced from a catalog. + It is required when sourceType is "Catalog", and forbidden otherwise. properties: channels: description: |- - channels is an optional reference to a set of channels belonging to - the package specified in the packageName field. + channels is optional and specifies a set of channels belonging to the package + specified in the packageName field. - A "channel" is a package-author-defined stream of updates for an extension. + A channel is a package-author-defined stream of updates for an extension. - Each channel in the list must follow the DNS subdomain standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. No more than 256 channels can be specified. + Each channel in the list must follow the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. + You can specify no more than 256 channels. - When specified, it is used to constrain the set of installable bundles and - the automated upgrade path. This constraint is an AND operation with the - version field. For example: + When specified, it constrains the set of installable bundles and the automated upgrade path. + This constraint is an AND operation with the version field. For example: - Given channel is set to "foo" - Given version is set to ">=1.0.0, <1.5.0" - - Only bundles that exist in channel "foo" AND satisfy the version range comparison will be considered installable - - Automatic upgrades will be constrained to upgrade edges defined by the selected channel + - Only bundles that exist in channel "foo" AND satisfy the version range comparison are considered installable + - Automatic upgrades are constrained to upgrade edges defined by the selected channel - When unspecified, upgrade edges across all channels will be used to identify valid automatic upgrade paths. + When unspecified, upgrade edges across all channels are used to identify valid automatic upgrade paths. Some examples of valid values are: - 1.1.x @@ -246,13 +278,12 @@ spec: type: array packageName: description: |- - packageName is a reference to the name of the package to be installed - and is used to filter the content from catalogs. + packageName specifies the name of the package to be installed and is used to filter + the content from catalogs. - packageName is required, immutable, and follows the DNS subdomain standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. + It is required, immutable, and follows the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. Some examples of valid values are: - some-package @@ -279,12 +310,9 @@ spec: rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$") selector: description: |- - selector is an optional field that can be used - to filter the set of ClusterCatalogs used in the bundle - selection process. + selector is optional and filters the set of ClusterCatalogs used in the bundle selection process. - When unspecified, all ClusterCatalogs will be used in - the bundle selection process. + When unspecified, all ClusterCatalogs are used in the bundle selection process. properties: matchExpressions: description: matchExpressions is a list of label selector @@ -332,35 +360,34 @@ spec: upgradeConstraintPolicy: default: CatalogProvided description: |- - upgradeConstraintPolicy is an optional field that controls whether - the upgrade path(s) defined in the catalog are enforced for the package - referenced in the packageName field. + upgradeConstraintPolicy is optional and controls whether the upgrade paths defined in the catalog + are enforced for the package referenced in the packageName field. - Allowed values are: "CatalogProvided" or "SelfCertified", or omitted. + Allowed values are "CatalogProvided", "SelfCertified", or omitted. - When this field is set to "CatalogProvided", automatic upgrades will only occur - when upgrade constraints specified by the package author are met. + When set to "CatalogProvided", automatic upgrades only occur when upgrade constraints specified by the package + author are met. - When this field is set to "SelfCertified", the upgrade constraints specified by - the package author are ignored. This allows for upgrades and downgrades to - any version of the package. This is considered a dangerous operation as it - can lead to unknown and potentially disastrous outcomes, such as data - loss. It is assumed that users have independently verified changes when - using this option. + When set to "SelfCertified", the upgrade constraints specified by the package author are ignored. + This allows upgrades and downgrades to any version of the package. + This is considered a dangerous operation as it can lead to unknown and potentially disastrous outcomes, + such as data loss. + Use this option only if you have independently verified the changes. - When this field is omitted, the default value is "CatalogProvided". + When omitted, the default value is "CatalogProvided". enum: - CatalogProvided - SelfCertified type: string version: description: |- - version is an optional semver constraint (a specific version or range of versions). When unspecified, the latest version available will be installed. + version is an optional semver constraint (a specific version or range of versions). + When unspecified, the latest version available is installed. Acceptable version ranges are no longer than 64 characters. - Version ranges are composed of comma- or space-delimited values and one or - more comparison operators, known as comparison strings. Additional - comparison strings can be added using the OR operator (||). + Version ranges are composed of comma- or space-delimited values and one or more comparison operators, + known as comparison strings. + You can add additional comparison strings using the OR operator (||). # Range Comparisons @@ -438,13 +465,12 @@ spec: type: object sourceType: description: |- - sourceType is a required reference to the type of install source. + sourceType is required and specifies the type of install source. - Allowed values are "Catalog" + The only allowed value is "Catalog". - When this field is set to "Catalog", information for determining the - appropriate bundle of content to install will be fetched from - ClusterCatalog resources existing on the cluster. + When set to "Catalog", information for determining the appropriate bundle of content to install + is fetched from ClusterCatalog resources on the cluster. When using the Catalog sourceType, the catalog field must also be set. enum: - Catalog @@ -468,23 +494,25 @@ spec: properties: conditions: description: |- + conditions represents the current state of the ClusterExtension. + The set of condition types which apply to all spec.source variations are Installed and Progressing. - The Installed condition represents whether or not the bundle has been installed for this ClusterExtension. - When Installed is True and the Reason is Succeeded, the bundle has been successfully installed. - When Installed is False and the Reason is Failed, the bundle has failed to install. + The Installed condition represents whether the bundle has been installed for this ClusterExtension: + - When Installed is True and the Reason is Succeeded, the bundle has been successfully installed. + - When Installed is False and the Reason is Failed, the bundle has failed to install. The Progressing condition represents whether or not the ClusterExtension is advancing towards a new state. When Progressing is True and the Reason is Succeeded, the ClusterExtension is making progress towards a new state. When Progressing is True and the Reason is Retrying, the ClusterExtension has encountered an error that could be resolved on subsequent reconciliation attempts. When Progressing is False and the Reason is Blocked, the ClusterExtension has encountered an error that requires manual intervention for recovery. - When the ClusterExtension is sourced from a catalog, if may also communicate a deprecation condition. - These are indications from a package owner to guide users away from a particular package, channel, or bundle. - BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog. - ChannelDeprecated is set if the requested channel is marked deprecated in the catalog. - PackageDeprecated is set if the requested package is marked deprecated in the catalog. - Deprecated is a rollup condition that is present when any of the deprecated conditions are present. + When the ClusterExtension is sourced from a catalog, it may also communicate a deprecation condition. + These are indications from a package owner to guide users away from a particular package, channel, or bundle: + - BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog. + - ChannelDeprecated is set if the requested channel is marked deprecated in the catalog. + - PackageDeprecated is set if the requested package is marked deprecated in the catalog. + - Deprecated is a rollup condition that is present when any of the deprecated conditions are present. items: description: Condition contains details for one aspect of the current state of this API Resource. @@ -549,17 +577,16 @@ spec: properties: bundle: description: |- - bundle is a required field which represents the identifying attributes of a bundle. + bundle is required and represents the identifying attributes of a bundle. - A "bundle" is a versioned set of content that represents the resources that - need to be applied to a cluster to install a package. + A "bundle" is a versioned set of content that represents the resources that need to be applied + to a cluster to install a package. properties: name: description: |- - name is required and follows the DNS subdomain standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. + name is required and follows the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. type: string x-kubernetes-validations: - message: packageName must be a valid DNS1123 subdomain. @@ -569,8 +596,8 @@ spec: rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$") version: description: |- - version is a required field and is a reference to the version that this bundle represents - version follows the semantic versioning standard as defined in https://semver.org/. + version is required and references the version that this bundle represents. + It follows the semantic versioning standard as defined in https://semver.org/. type: string x-kubernetes-validations: - message: version must be well-formed semver diff --git a/helm/olmv1/templates/e2e/configmap-olmv1-system-e2e-registries-conf.yml b/helm/olmv1/templates/e2e/configmap-olmv1-system-e2e-registries-conf.yml index d6fec9b5f..44c5bdea2 100644 --- a/helm/olmv1/templates/e2e/configmap-olmv1-system-e2e-registries-conf.yml +++ b/helm/olmv1/templates/e2e/configmap-olmv1-system-e2e-registries-conf.yml @@ -5,6 +5,10 @@ data: [[registry]] prefix = "mirrored-registry.operator-controller-e2e.svc.cluster.local:5000" location = "docker-registry.operator-controller-e2e.svc.cluster.local:5000" + + [[registry]] + prefix = "dynamic-registry.operator-controller-e2e.svc.cluster.local:5000" + location = "docker-registry.operator-controller-e2e.svc.cluster.local:5000" kind: ConfigMap metadata: annotations: diff --git a/helm/olmv1/templates/e2e/pod-olmv1-system-e2e-coverage-copy-pod.yml b/helm/olmv1/templates/e2e/pod-olmv1-system-e2e-coverage-copy-pod.yml index fa4b11aca..ce1ff3c41 100644 --- a/helm/olmv1/templates/e2e/pod-olmv1-system-e2e-coverage-copy-pod.yml +++ b/helm/olmv1/templates/e2e/pod-olmv1-system-e2e-coverage-copy-pod.yml @@ -17,6 +17,7 @@ spec: image: busybox:1.36 name: tar securityContext: + readOnlyRootFilesystem: true allowPrivilegeEscalation: false capabilities: drop: diff --git a/helm/tilt.yaml b/helm/tilt.yaml index aaed7c71f..0fe3bec1f 100644 --- a/helm/tilt.yaml +++ b/helm/tilt.yaml @@ -14,7 +14,6 @@ options: operatorController: features: enabled: - - SingleOwnNamespaceInstallSupport - PreflightPermissions - HelmChartSupport disabled: diff --git a/internal/operator-controller/OWNERS b/internal/operator-controller/OWNERS deleted file mode 100644 index 3174715ba..000000000 --- a/internal/operator-controller/OWNERS +++ /dev/null @@ -1,2 +0,0 @@ -approvers: - - operator-controller-approvers diff --git a/internal/operator-controller/controllers/clusterextensionrevision_controller.go b/internal/operator-controller/controllers/clusterextensionrevision_controller.go index f9c11c2ad..a7da574d6 100644 --- a/internal/operator-controller/controllers/clusterextensionrevision_controller.go +++ b/internal/operator-controller/controllers/clusterextensionrevision_controller.go @@ -178,7 +178,7 @@ func (c *ClusterExtensionRevisionReconciler) reconcile(ctx context.Context, rev } } - if !rres.InTransistion() { + if !rres.InTransition() { markAsProgressing(rev, ocv1.ReasonSucceeded, fmt.Sprintf("Revision %s has rolled out.", revVersion)) } else { markAsProgressing(rev, ocv1.ReasonRollingOut, fmt.Sprintf("Revision %s is rolling out.", revVersion)) @@ -222,8 +222,8 @@ func (c *ClusterExtensionRevisionReconciler) reconcile(ctx context.Context, rev for _, ores := range pres.GetObjects() { // we probably want an AvailabilityProbeType and run through all of them independently of whether // the revision is complete or not - pr := ores.Probes()[boxcutter.ProgressProbeType] - if pr.Success { + pr := ores.ProbeResults()[boxcutter.ProgressProbeType] + if pr.Status == machinerytypes.ProbeStatusTrue { continue } diff --git a/internal/operator-controller/controllers/clusterextensionrevision_controller_test.go b/internal/operator-controller/controllers/clusterextensionrevision_controller_test.go index e54827962..8a1c52513 100644 --- a/internal/operator-controller/controllers/clusterextensionrevision_controller_test.go +++ b/internal/operator-controller/controllers/clusterextensionrevision_controller_test.go @@ -152,9 +152,9 @@ func Test_ClusterExtensionRevisionReconciler_Reconcile_RevisionReconciliation(t objects: []machinery.ObjectResult{ mockObjectResult{ success: true, - probes: map[string]machinery.ObjectProbeResult{ + probes: machinerytypes.ProbeResultContainer{ boxcutter.ProgressProbeType: { - Success: true, + Status: machinerytypes.ProbeStatusTrue, }, }, }, @@ -170,9 +170,9 @@ func Test_ClusterExtensionRevisionReconciler_Reconcile_RevisionReconciliation(t obj.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Service")) return obj }(), - probes: map[string]machinery.ObjectProbeResult{ + probes: machinerytypes.ProbeResultContainer{ boxcutter.ProgressProbeType: { - Success: false, + Status: machinerytypes.ProbeStatusFalse, Messages: []string{ "something bad happened", "something worse happened", @@ -198,9 +198,9 @@ func Test_ClusterExtensionRevisionReconciler_Reconcile_RevisionReconciliation(t obj.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("ConfigMap")) return obj }(), - probes: map[string]machinery.ObjectProbeResult{ + probes: machinerytypes.ProbeResultContainer{ boxcutter.ProgressProbeType: { - Success: false, + Status: machinerytypes.ProbeStatusFalse, Messages: []string{ "we have a problem", }, @@ -243,9 +243,9 @@ func Test_ClusterExtensionRevisionReconciler_Reconcile_RevisionReconciliation(t objects: []machinery.ObjectResult{ mockObjectResult{ success: true, - probes: map[string]machinery.ObjectProbeResult{ - boxcutter.ProgressProbeType: { - Success: true, + probes: machinerytypes.ProbeResultContainer{ + boxcutter.ProgressProbeType: machinerytypes.ProbeResult{ + Status: machinerytypes.ProbeStatusTrue, }, }, }, @@ -261,9 +261,9 @@ func Test_ClusterExtensionRevisionReconciler_Reconcile_RevisionReconciliation(t obj.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Service")) return obj }(), - probes: map[string]machinery.ObjectProbeResult{ - boxcutter.ProgressProbeType: { - Success: false, + probes: machinerytypes.ProbeResultContainer{ + boxcutter.ProgressProbeType: machinerytypes.ProbeResult{ + Status: machinerytypes.ProbeStatusFalse, Messages: []string{ "something bad happened", "something worse happened", @@ -289,9 +289,9 @@ func Test_ClusterExtensionRevisionReconciler_Reconcile_RevisionReconciliation(t obj.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("ConfigMap")) return obj }(), - probes: map[string]machinery.ObjectProbeResult{ - boxcutter.ProgressProbeType: { - Success: false, + probes: machinerytypes.ProbeResultContainer{ + boxcutter.ProgressProbeType: machinerytypes.ProbeResult{ + Status: machinerytypes.ProbeStatusFalse, Messages: []string{ "we have a problem", }, @@ -1018,7 +1018,7 @@ func (m mockRevisionResult) GetPhases() []machinery.PhaseResult { return m.phases } -func (m mockRevisionResult) InTransistion() bool { +func (m mockRevisionResult) InTransition() bool { return m.inTransition } @@ -1034,6 +1034,8 @@ func (m mockRevisionResult) String() string { return m.string } +var _ machinery.PhaseResult = &mockPhaseResult{} + type mockPhaseResult struct { name string validationError *validation.PhaseValidationError @@ -1056,7 +1058,7 @@ func (m mockPhaseResult) GetObjects() []machinery.ObjectResult { return m.objects } -func (m mockPhaseResult) InTransistion() bool { +func (m mockPhaseResult) InTransition() bool { return m.inTransition } @@ -1072,12 +1074,28 @@ func (m mockPhaseResult) String() string { return m.string } +var _ machinery.ObjectResult = &mockObjectResult{} + type mockObjectResult struct { - action machinery.Action - object machinery.Object - success bool - probes map[string]machinery.ObjectProbeResult - string string + action machinery.Action + object machinery.Object + success bool + complete bool + paused bool + probes machinerytypes.ProbeResultContainer + string string +} + +func (m mockObjectResult) ProbeResults() machinerytypes.ProbeResultContainer { + return m.probes +} + +func (m mockObjectResult) IsComplete() bool { + return m.complete +} + +func (m mockObjectResult) IsPaused() bool { + return m.paused } func (m mockObjectResult) Action() machinery.Action { @@ -1092,14 +1110,12 @@ func (m mockObjectResult) Success() bool { return m.success } -func (m mockObjectResult) Probes() map[string]machinery.ObjectProbeResult { - return m.probes -} - func (m mockObjectResult) String() string { return m.string } +var _ machinery.RevisionTeardownResult = mockRevisionTeardownResult{} + type mockRevisionTeardownResult struct { phases []machinery.PhaseTeardownResult isComplete bool diff --git a/internal/operator-controller/features/features.go b/internal/operator-controller/features/features.go index 1ad409e09..e0ddd6401 100644 --- a/internal/operator-controller/features/features.go +++ b/internal/operator-controller/features/features.go @@ -33,8 +33,8 @@ var operatorControllerFeatureGates = map[featuregate.Feature]featuregate.Feature // registry+v1 cluster extensions with single or own namespaces modes // i.e. with a single watch namespace. SingleOwnNamespaceInstallSupport: { - Default: false, - PreRelease: featuregate.Alpha, + Default: true, + PreRelease: featuregate.GA, LockToDefault: false, }, diff --git a/kind-config/kind-config-2node.yaml b/kind-config/kind-config-2node.yaml new file mode 100644 index 000000000..5532a9932 --- /dev/null +++ b/kind-config/kind-config-2node.yaml @@ -0,0 +1,45 @@ +apiVersion: kind.x-k8s.io/v1alpha4 +kind: Cluster +nodes: + - role: control-plane + extraPortMappings: + # e2e image registry service's NodePort + - containerPort: 30000 + hostPort: 30000 + listenAddress: "127.0.0.1" + protocol: tcp + # prometheus metrics service's NodePort + - containerPort: 30900 + hostPort: 30900 + listenAddress: "127.0.0.1" + protocol: tcp + kubeadmConfigPatches: + - | + kind: ClusterConfiguration + apiServer: + extraArgs: + enable-admission-plugins: OwnerReferencesPermissionEnforcement + - | + kind: InitConfiguration + nodeRegistration: + kubeletExtraArgs: + node-labels: "ingress-ready=true" + taints: [] + extraMounts: + - hostPath: ./hack/kind-config/containerd/certs.d + containerPath: /etc/containerd/certs.d + - role: control-plane + kubeadmConfigPatches: + - | + kind: JoinConfiguration + nodeRegistration: + kubeletExtraArgs: + node-labels: "ingress-ready=true" + taints: [] + extraMounts: + - hostPath: ./hack/kind-config/containerd/certs.d + containerPath: /etc/containerd/certs.d +containerdConfigPatches: + - |- + [plugins."io.containerd.grpc.v1.cri".registry] + config_path = "/etc/containerd/certs.d" diff --git a/kind-config.yaml b/kind-config/kind-config.yaml similarity index 100% rename from kind-config.yaml rename to kind-config/kind-config.yaml diff --git a/manifests/experimental-e2e.yaml b/manifests/experimental-e2e.yaml index 1b583d207..8d56b5ad4 100644 --- a/manifests/experimental-e2e.yaml +++ b/manifests/experimental-e2e.yaml @@ -152,6 +152,10 @@ data: [[registry]] prefix = "mirrored-registry.operator-controller-e2e.svc.cluster.local:5000" location = "docker-registry.operator-controller-e2e.svc.cluster.local:5000" + + [[registry]] + prefix = "dynamic-registry.operator-controller-e2e.svc.cluster.local:5000" + location = "docker-registry.operator-controller-e2e.svc.cluster.local:5000" kind: ConfigMap metadata: annotations: @@ -185,7 +189,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 olm.operatorframework.io/generator: experimental name: clustercatalogs.olm.operatorframework.io spec: @@ -211,7 +215,7 @@ spec: schema: openAPIV3Schema: description: |- - ClusterCatalog enables users to make File-Based Catalog (FBC) catalog data available to the cluster. + ClusterCatalog makes File-Based Catalog (FBC) data available to your cluster. For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs properties: apiVersion: @@ -233,29 +237,24 @@ spec: type: object spec: description: |- - spec is the desired state of the ClusterCatalog. - spec is required. - The controller will work to ensure that the desired - catalog is unpacked and served over the catalog content HTTP server. + spec is a required field that defines the desired state of the ClusterCatalog. + The controller ensures that the catalog is unpacked and served over the catalog content HTTP server. properties: availabilityMode: default: Available description: |- - availabilityMode allows users to define how the ClusterCatalog is made available to clients on the cluster. - availabilityMode is optional. + availabilityMode is an optional field that defines how the ClusterCatalog is made available to clients on the cluster. - Allowed values are "Available" and "Unavailable" and omitted. + Allowed values are "Available", "Unavailable", or omitted. When omitted, the default value is "Available". - When set to "Available", the catalog contents will be unpacked and served over the catalog content HTTP server. - Setting the availabilityMode to "Available" tells clients that they should consider this ClusterCatalog - and its contents as usable. + When set to "Available", the catalog contents are unpacked and served over the catalog content HTTP server. + Clients should consider this ClusterCatalog and its contents as usable. - When set to "Unavailable", the catalog contents will no longer be served over the catalog content HTTP server. - When set to this availabilityMode it should be interpreted the same as the ClusterCatalog not existing. - Setting the availabilityMode to "Unavailable" can be useful in scenarios where a user may not want - to delete the ClusterCatalog all together, but would still like it to be treated as if it doesn't exist. + When set to "Unavailable", the catalog contents are no longer served over the catalog content HTTP server. + Treat this the same as if the ClusterCatalog does not exist. + Use "Unavailable" when you want to keep the ClusterCatalog but treat it as if it doesn't exist. enum: - Unavailable - Available @@ -263,35 +262,33 @@ spec: priority: default: 0 description: |- - priority allows the user to define a priority for a ClusterCatalog. - priority is optional. + priority is an optional field that defines a priority for this ClusterCatalog. - A ClusterCatalog's priority is used by clients as a tie-breaker between ClusterCatalogs that meet the client's requirements. - A higher number means higher priority. + Clients use the ClusterCatalog priority as a tie-breaker between ClusterCatalogs that meet their requirements. + Higher numbers mean higher priority. - It is up to clients to decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements. - When deciding how to break the tie in this scenario, it is recommended that clients prompt their users for additional input. + Clients decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements. + Clients should prompt users for additional input to break the tie. - When omitted, the default priority is 0 because that is the zero value of integers. + When omitted, the default priority is 0. - Negative numbers can be used to specify a priority lower than the default. - Positive numbers can be used to specify a priority higher than the default. + Use negative numbers to specify a priority lower than the default. + Use positive numbers to specify a priority higher than the default. The lowest possible value is -2147483648. The highest possible value is 2147483647. format: int32 + maximum: 2147483647 + minimum: -2147483648 type: integer source: description: |- - source allows a user to define the source of a catalog. - A "catalog" contains information on content that can be installed on a cluster. - Providing a catalog source makes the contents of the catalog discoverable and usable by - other on-cluster components. - These on-cluster components may do a variety of things with this information, such as - presenting the content in a GUI dashboard or installing content from the catalog on the cluster. + source is a required field that defines the source of a catalog. + A catalog contains information on content that can be installed on a cluster. + The catalog source makes catalog contents discoverable and usable by other on-cluster components. + These components can present the content in a GUI dashboard or install content from the catalog on the cluster. The catalog source must contain catalog metadata in the File-Based Catalog (FBC) format. For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs. - source is a required field. Below is a minimal example of a ClusterCatalogSpec that sources a catalog from an image: @@ -302,25 +299,23 @@ spec: properties: image: description: |- - image is used to configure how catalog contents are sourced from an OCI image. - This field is required when type is Image, and forbidden otherwise. + image configures how catalog contents are sourced from an OCI image. + It is required when type is Image, and forbidden otherwise. properties: pollIntervalMinutes: description: |- - pollIntervalMinutes allows the user to set the interval, in minutes, at which the image source should be polled for new content. - pollIntervalMinutes is optional. - pollIntervalMinutes can not be specified when ref is a digest-based reference. + pollIntervalMinutes is an optional field that sets the interval, in minutes, at which the image source is polled for new content. + You cannot specify pollIntervalMinutes when ref is a digest-based reference. - When omitted, the image will not be polled for new content. + When omitted, the image is not polled for new content. minimum: 1 type: integer ref: description: |- - ref allows users to define the reference to a container image containing Catalog contents. - ref is required. - ref can not be more than 1000 characters. + ref is a required field that defines the reference to a container image containing catalog contents. + It cannot be more than 1000 characters. - A reference can be broken down into 3 parts - the domain, name, and identifier. + A reference has 3 parts: the domain, name, and identifier. The domain is typically the registry where an image is located. It must be alphanumeric characters (lowercase and uppercase) separated by the "." character. @@ -403,12 +398,11 @@ spec: : true' type: description: |- - type is a reference to the type of source the catalog is sourced from. - type is required. + type is a required field that specifies the type of source for the catalog. The only allowed value is "Image". - When set to "Image", the ClusterCatalog content will be sourced from an OCI image. + When set to "Image", the ClusterCatalog content is sourced from an OCI image. When using an image source, the image field must be set and must be the only field defined for this type. enum: - Image @@ -426,31 +420,30 @@ spec: type: object status: description: |- - status contains information about the state of the ClusterCatalog such as: - - Whether or not the catalog contents are being served via the catalog content HTTP server - - Whether or not the ClusterCatalog is progressing to a new state + status contains the following information about the state of the ClusterCatalog: + - Whether the catalog contents are being served via the catalog content HTTP server + - Whether the ClusterCatalog is progressing to a new state - A reference to the source from which the catalog contents were retrieved properties: conditions: description: |- - conditions is a representation of the current state for this ClusterCatalog. + conditions represents the current state of this ClusterCatalog. The current condition types are Serving and Progressing. - The Serving condition is used to represent whether or not the contents of the catalog is being served via the HTTP(S) web server. - When it has a status of True and a reason of Available, the contents of the catalog are being served. - When it has a status of False and a reason of Unavailable, the contents of the catalog are not being served because the contents are not yet available. - When it has a status of False and a reason of UserSpecifiedUnavailable, the contents of the catalog are not being served because the catalog has been intentionally marked as unavailable. + The Serving condition represents whether the catalog contents are being served via the HTTP(S) web server: + - When status is True and reason is Available, the catalog contents are being served. + - When status is False and reason is Unavailable, the catalog contents are not being served because the contents are not yet available. + - When status is False and reason is UserSpecifiedUnavailable, the catalog contents are not being served because the catalog has been intentionally marked as unavailable. - The Progressing condition is used to represent whether or not the ClusterCatalog is progressing or is ready to progress towards a new state. - When it has a status of True and a reason of Retrying, there was an error in the progression of the ClusterCatalog that may be resolved on subsequent reconciliation attempts. - When it has a status of True and a reason of Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing. - When it has a status of False and a reason of Blocked, there was an error in the progression of the ClusterCatalog that requires manual intervention for recovery. + The Progressing condition represents whether the ClusterCatalog is progressing or is ready to progress towards a new state: + - When status is True and reason is Retrying, an error occurred that may be resolved on subsequent reconciliation attempts. + - When status is True and reason is Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing. + - When status is False and reason is Blocked, an error occurred that requires manual intervention for recovery. - In the case that the Serving condition is True with reason Available and Progressing is True with reason Retrying, the previously fetched - catalog contents are still being served via the HTTP(S) web server while we are progressing towards serving a new version of the catalog - contents. This could occur when we've initially fetched the latest contents from the source for this catalog and when polling for changes - to the contents we identify that there are updates to the contents. + If the system initially fetched contents and polling identifies updates, both conditions can be active simultaneously: + - The Serving condition remains True with reason Available because the previous contents are still served via the HTTP(S) web server. + - The Progressing condition is True with reason Retrying because the system is working to serve the new version. items: description: Condition contains details for one aspect of the current state of this API Resource. @@ -511,11 +504,9 @@ spec: x-kubernetes-list-type: map lastUnpacked: description: |- - lastUnpacked represents the last time the contents of the - catalog were extracted from their source format. As an example, - when using an Image source, the OCI image will be pulled and the - image layers written to a file-system backed cache. We refer to the - act of this extraction from the source format as "unpacking". + lastUnpacked represents the last time the catalog contents were extracted from their source format. + For example, when using an Image source, the OCI image is pulled and image layers are written to a file-system backed cache. + This extraction from the source format is called "unpacking". format: date-time type: string resolvedSource: @@ -524,14 +515,14 @@ spec: properties: image: description: |- - image is a field containing resolution information for a catalog sourced from an image. - This field must be set when type is Image, and forbidden otherwise. + image contains resolution information for a catalog sourced from an image. + It must be set when type is Image, and forbidden otherwise. properties: ref: description: |- ref contains the resolved image digest-based reference. - The digest format is used so users can use other tooling to fetch the exact - OCI manifests that were used to extract the catalog contents. + The digest format allows you to use other tooling to fetch the exact OCI manifests + that were used to extract the catalog contents. maxLength: 1000 type: string x-kubernetes-validations: @@ -565,12 +556,11 @@ spec: type: object type: description: |- - type is a reference to the type of source the catalog is sourced from. - type is required. + type is a required field that specifies the type of source for the catalog. The only allowed value is "Image". - When set to "Image", information about the resolved image source will be set in the 'image' field. + When set to "Image", information about the resolved image source is set in the image field. enum: - Image type: string @@ -589,19 +579,16 @@ spec: properties: base: description: |- - base is a cluster-internal URL that provides endpoints for - accessing the content of the catalog. + base is a cluster-internal URL that provides endpoints for accessing the catalog content. - It is expected that clients append the path for the endpoint they wish - to access. + Clients should append the path for the endpoint they want to access. - Currently, only a single endpoint is served and is accessible at the path - /api/v1. + Currently, only a single endpoint is served and is accessible at the path /api/v1. The endpoints served for the v1 API are: - - /all - this endpoint returns the entirety of the catalog contents in the FBC format + - /all - this endpoint returns the entire catalog contents in the FBC format - As the needs of users and clients of the evolve, new endpoints may be added. + New endpoints may be added as needs evolve. maxLength: 525 type: string x-kubernetes-validations: @@ -628,7 +615,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 olm.operatorframework.io/generator: experimental name: clusterextensionrevisions.olm.operatorframework.io spec: @@ -903,7 +890,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 olm.operatorframework.io/generator: experimental name: clusterextensions.olm.operatorframework.io spec: @@ -959,9 +946,9 @@ spec: properties: config: description: |- - config is an optional field used to specify bundle specific configuration - used to configure the bundle. Configuration is bundle specific and a bundle may provide - a configuration schema. When not specified, the default configuration of the resolved bundle will be used. + config is optional and specifies bundle-specific configuration. + Configuration is bundle-specific and a bundle may provide a configuration schema. + When not specified, the default configuration of the resolved bundle is used. config is validated against a configuration schema provided by the resolved bundle. If the bundle does not provide a configuration schema the bundle is deemed to not be configurable. More information on how @@ -969,21 +956,19 @@ spec: properties: configType: description: |- - configType is a required reference to the type of configuration source. + configType is required and specifies the type of configuration source. - Allowed values are "Inline" + The only allowed value is "Inline". - When this field is set to "Inline", the cluster extension configuration is defined inline within the - ClusterExtension resource. + When set to "Inline", the cluster extension configuration is defined inline within the ClusterExtension resource. enum: - Inline type: string inline: description: |- - inline contains JSON or YAML values specified directly in the - ClusterExtension. + inline contains JSON or YAML values specified directly in the ClusterExtension. - inline is used to specify arbitrary configuration values for the ClusterExtension. + It is used to specify arbitrary configuration values for the ClusterExtension. It must be set if configType is 'Inline' and must be a valid JSON/YAML object containing at least one property. The configuration values are validated at runtime against a JSON schema provided by the bundle. minProperties: 1 @@ -999,37 +984,35 @@ spec: : !has(self.inline)' install: description: |- - install is an optional field used to configure the installation options - for the ClusterExtension such as the pre-flight check configuration. + install is optional and configures installation options for the ClusterExtension, + such as the pre-flight check configuration. properties: preflight: description: |- - preflight is an optional field that can be used to configure the checks that are - run before installation or upgrade of the content for the package specified in the packageName field. + preflight is optional and configures the checks that run before installation or upgrade + of the content for the package specified in the packageName field. When specified, it replaces the default preflight configuration for install/upgrade actions. - When not specified, the default configuration will be used. + When not specified, the default configuration is used. properties: crdUpgradeSafety: description: |- - crdUpgradeSafety is used to configure the CRD Upgrade Safety pre-flight - checks that run prior to upgrades of installed content. + crdUpgradeSafety configures the CRD Upgrade Safety pre-flight checks that run + before upgrades of installed content. - The CRD Upgrade Safety pre-flight check safeguards from unintended - consequences of upgrading a CRD, such as data loss. + The CRD Upgrade Safety pre-flight check safeguards from unintended consequences of upgrading a CRD, + such as data loss. properties: enforcement: description: |- - enforcement is a required field, used to configure the state of the CRD Upgrade Safety pre-flight check. + enforcement is required and configures the state of the CRD Upgrade Safety pre-flight check. Allowed values are "None" or "Strict". The default value is "Strict". - When set to "None", the CRD Upgrade Safety pre-flight check will be skipped - when performing an upgrade operation. This should be used with caution as - unintended consequences such as data loss can occur. + When set to "None", the CRD Upgrade Safety pre-flight check is skipped during an upgrade operation. + Use this option with caution as unintended consequences such as data loss can occur. - When set to "Strict", the CRD Upgrade Safety pre-flight check will be run when - performing an upgrade operation. + When set to "Strict", the CRD Upgrade Safety pre-flight check runs during an upgrade operation. enum: - None - Strict @@ -1051,16 +1034,15 @@ spec: rule: has(self.preflight) namespace: description: |- - namespace is a reference to a Kubernetes namespace. - This is the namespace in which the provided ServiceAccount must exist. - It also designates the default namespace where namespace-scoped resources - for the extension are applied to the cluster. + namespace specifies a Kubernetes namespace. + This is the namespace where the provided ServiceAccount must exist. + It also designates the default namespace where namespace-scoped resources for the extension are applied to the cluster. Some extensions may contain namespace-scoped resources to be applied in other namespaces. This namespace must exist. - namespace is required, immutable, and follows the DNS label standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters or hyphens (-), - start and end with an alphanumeric character, and be no longer than 63 characters + The namespace field is required, immutable, and follows the DNS label standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters or hyphens (-), start and end with an alphanumeric character, + and be no longer than 63 characters. [RFC 1123]: https://tools.ietf.org/html/rfc1123 maxLength: 63 @@ -1072,24 +1054,22 @@ spec: rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?$") serviceAccount: description: |- - serviceAccount is a reference to a ServiceAccount used to perform all interactions - with the cluster that are required to manage the extension. + serviceAccount specifies a ServiceAccount used to perform all interactions with the cluster + that are required to manage the extension. The ServiceAccount must be configured with the necessary permissions to perform these interactions. The ServiceAccount must exist in the namespace referenced in the spec. - serviceAccount is required. + The serviceAccount field is required. properties: name: description: |- - name is a required, immutable reference to the name of the ServiceAccount - to be used for installation and management of the content for the package - specified in the packageName field. + name is a required, immutable reference to the name of the ServiceAccount used for installation + and management of the content for the package specified in the packageName field. This ServiceAccount must exist in the installNamespace. - name follows the DNS subdomain standard as defined in [RFC 1123]. - It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. + The name field follows the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. Some examples of valid values are: - some-serviceaccount @@ -1118,11 +1098,11 @@ spec: type: object source: description: |- - source is a required field which selects the installation source of content - for this ClusterExtension. Selection is performed by setting the sourceType. + source is required and selects the installation source of content for this ClusterExtension. + Set the sourceType field to perform the selection. - Catalog is currently the only implemented sourceType, and setting the - sourcetype to "Catalog" requires the catalog field to also be defined. + Catalog is currently the only implemented sourceType. + Setting sourceType to "Catalog" requires the catalog field to also be defined. Below is a minimal example of a source definition (in yaml): @@ -1133,30 +1113,29 @@ spec: properties: catalog: description: |- - catalog is used to configure how information is sourced from a catalog. - This field is required when sourceType is "Catalog", and forbidden otherwise. + catalog configures how information is sourced from a catalog. + It is required when sourceType is "Catalog", and forbidden otherwise. properties: channels: description: |- - channels is an optional reference to a set of channels belonging to - the package specified in the packageName field. + channels is optional and specifies a set of channels belonging to the package + specified in the packageName field. - A "channel" is a package-author-defined stream of updates for an extension. + A channel is a package-author-defined stream of updates for an extension. - Each channel in the list must follow the DNS subdomain standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. No more than 256 channels can be specified. + Each channel in the list must follow the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. + You can specify no more than 256 channels. - When specified, it is used to constrain the set of installable bundles and - the automated upgrade path. This constraint is an AND operation with the - version field. For example: + When specified, it constrains the set of installable bundles and the automated upgrade path. + This constraint is an AND operation with the version field. For example: - Given channel is set to "foo" - Given version is set to ">=1.0.0, <1.5.0" - - Only bundles that exist in channel "foo" AND satisfy the version range comparison will be considered installable - - Automatic upgrades will be constrained to upgrade edges defined by the selected channel + - Only bundles that exist in channel "foo" AND satisfy the version range comparison are considered installable + - Automatic upgrades are constrained to upgrade edges defined by the selected channel - When unspecified, upgrade edges across all channels will be used to identify valid automatic upgrade paths. + When unspecified, upgrade edges across all channels are used to identify valid automatic upgrade paths. Some examples of valid values are: - 1.1.x @@ -1186,13 +1165,12 @@ spec: type: array packageName: description: |- - packageName is a reference to the name of the package to be installed - and is used to filter the content from catalogs. + packageName specifies the name of the package to be installed and is used to filter + the content from catalogs. - packageName is required, immutable, and follows the DNS subdomain standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. + It is required, immutable, and follows the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. Some examples of valid values are: - some-package @@ -1219,12 +1197,9 @@ spec: rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$") selector: description: |- - selector is an optional field that can be used - to filter the set of ClusterCatalogs used in the bundle - selection process. + selector is optional and filters the set of ClusterCatalogs used in the bundle selection process. - When unspecified, all ClusterCatalogs will be used in - the bundle selection process. + When unspecified, all ClusterCatalogs are used in the bundle selection process. properties: matchExpressions: description: matchExpressions is a list of label selector @@ -1272,35 +1247,34 @@ spec: upgradeConstraintPolicy: default: CatalogProvided description: |- - upgradeConstraintPolicy is an optional field that controls whether - the upgrade path(s) defined in the catalog are enforced for the package - referenced in the packageName field. + upgradeConstraintPolicy is optional and controls whether the upgrade paths defined in the catalog + are enforced for the package referenced in the packageName field. - Allowed values are: "CatalogProvided" or "SelfCertified", or omitted. + Allowed values are "CatalogProvided", "SelfCertified", or omitted. - When this field is set to "CatalogProvided", automatic upgrades will only occur - when upgrade constraints specified by the package author are met. + When set to "CatalogProvided", automatic upgrades only occur when upgrade constraints specified by the package + author are met. - When this field is set to "SelfCertified", the upgrade constraints specified by - the package author are ignored. This allows for upgrades and downgrades to - any version of the package. This is considered a dangerous operation as it - can lead to unknown and potentially disastrous outcomes, such as data - loss. It is assumed that users have independently verified changes when - using this option. + When set to "SelfCertified", the upgrade constraints specified by the package author are ignored. + This allows upgrades and downgrades to any version of the package. + This is considered a dangerous operation as it can lead to unknown and potentially disastrous outcomes, + such as data loss. + Use this option only if you have independently verified the changes. - When this field is omitted, the default value is "CatalogProvided". + When omitted, the default value is "CatalogProvided". enum: - CatalogProvided - SelfCertified type: string version: description: |- - version is an optional semver constraint (a specific version or range of versions). When unspecified, the latest version available will be installed. + version is an optional semver constraint (a specific version or range of versions). + When unspecified, the latest version available is installed. Acceptable version ranges are no longer than 64 characters. - Version ranges are composed of comma- or space-delimited values and one or - more comparison operators, known as comparison strings. Additional - comparison strings can be added using the OR operator (||). + Version ranges are composed of comma- or space-delimited values and one or more comparison operators, + known as comparison strings. + You can add additional comparison strings using the OR operator (||). # Range Comparisons @@ -1378,13 +1352,12 @@ spec: type: object sourceType: description: |- - sourceType is a required reference to the type of install source. + sourceType is required and specifies the type of install source. - Allowed values are "Catalog" + The only allowed value is "Catalog". - When this field is set to "Catalog", information for determining the - appropriate bundle of content to install will be fetched from - ClusterCatalog resources existing on the cluster. + When set to "Catalog", information for determining the appropriate bundle of content to install + is fetched from ClusterCatalog resources on the cluster. When using the Catalog sourceType, the catalog field must also be set. enum: - Catalog @@ -1490,11 +1463,13 @@ spec: x-kubernetes-list-type: map conditions: description: |- + conditions represents the current state of the ClusterExtension. + The set of condition types which apply to all spec.source variations are Installed and Progressing. - The Installed condition represents whether or not the bundle has been installed for this ClusterExtension. - When Installed is True and the Reason is Succeeded, the bundle has been successfully installed. - When Installed is False and the Reason is Failed, the bundle has failed to install. + The Installed condition represents whether the bundle has been installed for this ClusterExtension: + - When Installed is True and the Reason is Succeeded, the bundle has been successfully installed. + - When Installed is False and the Reason is Failed, the bundle has failed to install. The Progressing condition represents whether or not the ClusterExtension is advancing towards a new state. When Progressing is True and the Reason is Succeeded, the ClusterExtension is making progress towards a new state. @@ -1503,12 +1478,12 @@ spec: When Progressing is True and Reason is RollingOut, the ClusterExtension has one or more ClusterExtensionRevisions in active roll out. - When the ClusterExtension is sourced from a catalog, if may also communicate a deprecation condition. - These are indications from a package owner to guide users away from a particular package, channel, or bundle. - BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog. - ChannelDeprecated is set if the requested channel is marked deprecated in the catalog. - PackageDeprecated is set if the requested package is marked deprecated in the catalog. - Deprecated is a rollup condition that is present when any of the deprecated conditions are present. + When the ClusterExtension is sourced from a catalog, it may also communicate a deprecation condition. + These are indications from a package owner to guide users away from a particular package, channel, or bundle: + - BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog. + - ChannelDeprecated is set if the requested channel is marked deprecated in the catalog. + - PackageDeprecated is set if the requested package is marked deprecated in the catalog. + - Deprecated is a rollup condition that is present when any of the deprecated conditions are present. items: description: Condition contains details for one aspect of the current state of this API Resource. @@ -1573,17 +1548,16 @@ spec: properties: bundle: description: |- - bundle is a required field which represents the identifying attributes of a bundle. + bundle is required and represents the identifying attributes of a bundle. - A "bundle" is a versioned set of content that represents the resources that - need to be applied to a cluster to install a package. + A "bundle" is a versioned set of content that represents the resources that need to be applied + to a cluster to install a package. properties: name: description: |- - name is required and follows the DNS subdomain standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. + name is required and follows the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. type: string x-kubernetes-validations: - message: packageName must be a valid DNS1123 subdomain. @@ -1593,8 +1567,8 @@ spec: rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$") version: description: |- - version is a required field and is a reference to the version that this bundle represents - version follows the semantic versioning standard as defined in https://semver.org/. + version is required and references the version that this bundle represents. + It follows the semantic versioning standard as defined in https://semver.org/. type: string x-kubernetes-validations: - message: version must be well-formed semver @@ -2159,6 +2133,7 @@ spec: image: busybox:1.36 name: tar securityContext: + readOnlyRootFilesystem: true allowPrivilegeEscalation: false capabilities: drop: @@ -2370,7 +2345,6 @@ spec: - --metrics-bind-address=:8443 - --pprof-bind-address=:6060 - --leader-elect - - --feature-gates=SingleOwnNamespaceInstallSupport=true - --feature-gates=PreflightPermissions=true - --feature-gates=HelmChartSupport=true - --feature-gates=BoxcutterRuntime=true diff --git a/manifests/experimental.yaml b/manifests/experimental.yaml index 7bff36748..324a0fe4c 100644 --- a/manifests/experimental.yaml +++ b/manifests/experimental.yaml @@ -150,7 +150,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 olm.operatorframework.io/generator: experimental name: clustercatalogs.olm.operatorframework.io spec: @@ -176,7 +176,7 @@ spec: schema: openAPIV3Schema: description: |- - ClusterCatalog enables users to make File-Based Catalog (FBC) catalog data available to the cluster. + ClusterCatalog makes File-Based Catalog (FBC) data available to your cluster. For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs properties: apiVersion: @@ -198,29 +198,24 @@ spec: type: object spec: description: |- - spec is the desired state of the ClusterCatalog. - spec is required. - The controller will work to ensure that the desired - catalog is unpacked and served over the catalog content HTTP server. + spec is a required field that defines the desired state of the ClusterCatalog. + The controller ensures that the catalog is unpacked and served over the catalog content HTTP server. properties: availabilityMode: default: Available description: |- - availabilityMode allows users to define how the ClusterCatalog is made available to clients on the cluster. - availabilityMode is optional. + availabilityMode is an optional field that defines how the ClusterCatalog is made available to clients on the cluster. - Allowed values are "Available" and "Unavailable" and omitted. + Allowed values are "Available", "Unavailable", or omitted. When omitted, the default value is "Available". - When set to "Available", the catalog contents will be unpacked and served over the catalog content HTTP server. - Setting the availabilityMode to "Available" tells clients that they should consider this ClusterCatalog - and its contents as usable. + When set to "Available", the catalog contents are unpacked and served over the catalog content HTTP server. + Clients should consider this ClusterCatalog and its contents as usable. - When set to "Unavailable", the catalog contents will no longer be served over the catalog content HTTP server. - When set to this availabilityMode it should be interpreted the same as the ClusterCatalog not existing. - Setting the availabilityMode to "Unavailable" can be useful in scenarios where a user may not want - to delete the ClusterCatalog all together, but would still like it to be treated as if it doesn't exist. + When set to "Unavailable", the catalog contents are no longer served over the catalog content HTTP server. + Treat this the same as if the ClusterCatalog does not exist. + Use "Unavailable" when you want to keep the ClusterCatalog but treat it as if it doesn't exist. enum: - Unavailable - Available @@ -228,35 +223,33 @@ spec: priority: default: 0 description: |- - priority allows the user to define a priority for a ClusterCatalog. - priority is optional. + priority is an optional field that defines a priority for this ClusterCatalog. - A ClusterCatalog's priority is used by clients as a tie-breaker between ClusterCatalogs that meet the client's requirements. - A higher number means higher priority. + Clients use the ClusterCatalog priority as a tie-breaker between ClusterCatalogs that meet their requirements. + Higher numbers mean higher priority. - It is up to clients to decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements. - When deciding how to break the tie in this scenario, it is recommended that clients prompt their users for additional input. + Clients decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements. + Clients should prompt users for additional input to break the tie. - When omitted, the default priority is 0 because that is the zero value of integers. + When omitted, the default priority is 0. - Negative numbers can be used to specify a priority lower than the default. - Positive numbers can be used to specify a priority higher than the default. + Use negative numbers to specify a priority lower than the default. + Use positive numbers to specify a priority higher than the default. The lowest possible value is -2147483648. The highest possible value is 2147483647. format: int32 + maximum: 2147483647 + minimum: -2147483648 type: integer source: description: |- - source allows a user to define the source of a catalog. - A "catalog" contains information on content that can be installed on a cluster. - Providing a catalog source makes the contents of the catalog discoverable and usable by - other on-cluster components. - These on-cluster components may do a variety of things with this information, such as - presenting the content in a GUI dashboard or installing content from the catalog on the cluster. + source is a required field that defines the source of a catalog. + A catalog contains information on content that can be installed on a cluster. + The catalog source makes catalog contents discoverable and usable by other on-cluster components. + These components can present the content in a GUI dashboard or install content from the catalog on the cluster. The catalog source must contain catalog metadata in the File-Based Catalog (FBC) format. For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs. - source is a required field. Below is a minimal example of a ClusterCatalogSpec that sources a catalog from an image: @@ -267,25 +260,23 @@ spec: properties: image: description: |- - image is used to configure how catalog contents are sourced from an OCI image. - This field is required when type is Image, and forbidden otherwise. + image configures how catalog contents are sourced from an OCI image. + It is required when type is Image, and forbidden otherwise. properties: pollIntervalMinutes: description: |- - pollIntervalMinutes allows the user to set the interval, in minutes, at which the image source should be polled for new content. - pollIntervalMinutes is optional. - pollIntervalMinutes can not be specified when ref is a digest-based reference. + pollIntervalMinutes is an optional field that sets the interval, in minutes, at which the image source is polled for new content. + You cannot specify pollIntervalMinutes when ref is a digest-based reference. - When omitted, the image will not be polled for new content. + When omitted, the image is not polled for new content. minimum: 1 type: integer ref: description: |- - ref allows users to define the reference to a container image containing Catalog contents. - ref is required. - ref can not be more than 1000 characters. + ref is a required field that defines the reference to a container image containing catalog contents. + It cannot be more than 1000 characters. - A reference can be broken down into 3 parts - the domain, name, and identifier. + A reference has 3 parts: the domain, name, and identifier. The domain is typically the registry where an image is located. It must be alphanumeric characters (lowercase and uppercase) separated by the "." character. @@ -368,12 +359,11 @@ spec: : true' type: description: |- - type is a reference to the type of source the catalog is sourced from. - type is required. + type is a required field that specifies the type of source for the catalog. The only allowed value is "Image". - When set to "Image", the ClusterCatalog content will be sourced from an OCI image. + When set to "Image", the ClusterCatalog content is sourced from an OCI image. When using an image source, the image field must be set and must be the only field defined for this type. enum: - Image @@ -391,31 +381,30 @@ spec: type: object status: description: |- - status contains information about the state of the ClusterCatalog such as: - - Whether or not the catalog contents are being served via the catalog content HTTP server - - Whether or not the ClusterCatalog is progressing to a new state + status contains the following information about the state of the ClusterCatalog: + - Whether the catalog contents are being served via the catalog content HTTP server + - Whether the ClusterCatalog is progressing to a new state - A reference to the source from which the catalog contents were retrieved properties: conditions: description: |- - conditions is a representation of the current state for this ClusterCatalog. + conditions represents the current state of this ClusterCatalog. The current condition types are Serving and Progressing. - The Serving condition is used to represent whether or not the contents of the catalog is being served via the HTTP(S) web server. - When it has a status of True and a reason of Available, the contents of the catalog are being served. - When it has a status of False and a reason of Unavailable, the contents of the catalog are not being served because the contents are not yet available. - When it has a status of False and a reason of UserSpecifiedUnavailable, the contents of the catalog are not being served because the catalog has been intentionally marked as unavailable. + The Serving condition represents whether the catalog contents are being served via the HTTP(S) web server: + - When status is True and reason is Available, the catalog contents are being served. + - When status is False and reason is Unavailable, the catalog contents are not being served because the contents are not yet available. + - When status is False and reason is UserSpecifiedUnavailable, the catalog contents are not being served because the catalog has been intentionally marked as unavailable. - The Progressing condition is used to represent whether or not the ClusterCatalog is progressing or is ready to progress towards a new state. - When it has a status of True and a reason of Retrying, there was an error in the progression of the ClusterCatalog that may be resolved on subsequent reconciliation attempts. - When it has a status of True and a reason of Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing. - When it has a status of False and a reason of Blocked, there was an error in the progression of the ClusterCatalog that requires manual intervention for recovery. + The Progressing condition represents whether the ClusterCatalog is progressing or is ready to progress towards a new state: + - When status is True and reason is Retrying, an error occurred that may be resolved on subsequent reconciliation attempts. + - When status is True and reason is Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing. + - When status is False and reason is Blocked, an error occurred that requires manual intervention for recovery. - In the case that the Serving condition is True with reason Available and Progressing is True with reason Retrying, the previously fetched - catalog contents are still being served via the HTTP(S) web server while we are progressing towards serving a new version of the catalog - contents. This could occur when we've initially fetched the latest contents from the source for this catalog and when polling for changes - to the contents we identify that there are updates to the contents. + If the system initially fetched contents and polling identifies updates, both conditions can be active simultaneously: + - The Serving condition remains True with reason Available because the previous contents are still served via the HTTP(S) web server. + - The Progressing condition is True with reason Retrying because the system is working to serve the new version. items: description: Condition contains details for one aspect of the current state of this API Resource. @@ -476,11 +465,9 @@ spec: x-kubernetes-list-type: map lastUnpacked: description: |- - lastUnpacked represents the last time the contents of the - catalog were extracted from their source format. As an example, - when using an Image source, the OCI image will be pulled and the - image layers written to a file-system backed cache. We refer to the - act of this extraction from the source format as "unpacking". + lastUnpacked represents the last time the catalog contents were extracted from their source format. + For example, when using an Image source, the OCI image is pulled and image layers are written to a file-system backed cache. + This extraction from the source format is called "unpacking". format: date-time type: string resolvedSource: @@ -489,14 +476,14 @@ spec: properties: image: description: |- - image is a field containing resolution information for a catalog sourced from an image. - This field must be set when type is Image, and forbidden otherwise. + image contains resolution information for a catalog sourced from an image. + It must be set when type is Image, and forbidden otherwise. properties: ref: description: |- ref contains the resolved image digest-based reference. - The digest format is used so users can use other tooling to fetch the exact - OCI manifests that were used to extract the catalog contents. + The digest format allows you to use other tooling to fetch the exact OCI manifests + that were used to extract the catalog contents. maxLength: 1000 type: string x-kubernetes-validations: @@ -530,12 +517,11 @@ spec: type: object type: description: |- - type is a reference to the type of source the catalog is sourced from. - type is required. + type is a required field that specifies the type of source for the catalog. The only allowed value is "Image". - When set to "Image", information about the resolved image source will be set in the 'image' field. + When set to "Image", information about the resolved image source is set in the image field. enum: - Image type: string @@ -554,19 +540,16 @@ spec: properties: base: description: |- - base is a cluster-internal URL that provides endpoints for - accessing the content of the catalog. + base is a cluster-internal URL that provides endpoints for accessing the catalog content. - It is expected that clients append the path for the endpoint they wish - to access. + Clients should append the path for the endpoint they want to access. - Currently, only a single endpoint is served and is accessible at the path - /api/v1. + Currently, only a single endpoint is served and is accessible at the path /api/v1. The endpoints served for the v1 API are: - - /all - this endpoint returns the entirety of the catalog contents in the FBC format + - /all - this endpoint returns the entire catalog contents in the FBC format - As the needs of users and clients of the evolve, new endpoints may be added. + New endpoints may be added as needs evolve. maxLength: 525 type: string x-kubernetes-validations: @@ -593,7 +576,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 olm.operatorframework.io/generator: experimental name: clusterextensionrevisions.olm.operatorframework.io spec: @@ -868,7 +851,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 olm.operatorframework.io/generator: experimental name: clusterextensions.olm.operatorframework.io spec: @@ -924,9 +907,9 @@ spec: properties: config: description: |- - config is an optional field used to specify bundle specific configuration - used to configure the bundle. Configuration is bundle specific and a bundle may provide - a configuration schema. When not specified, the default configuration of the resolved bundle will be used. + config is optional and specifies bundle-specific configuration. + Configuration is bundle-specific and a bundle may provide a configuration schema. + When not specified, the default configuration of the resolved bundle is used. config is validated against a configuration schema provided by the resolved bundle. If the bundle does not provide a configuration schema the bundle is deemed to not be configurable. More information on how @@ -934,21 +917,19 @@ spec: properties: configType: description: |- - configType is a required reference to the type of configuration source. + configType is required and specifies the type of configuration source. - Allowed values are "Inline" + The only allowed value is "Inline". - When this field is set to "Inline", the cluster extension configuration is defined inline within the - ClusterExtension resource. + When set to "Inline", the cluster extension configuration is defined inline within the ClusterExtension resource. enum: - Inline type: string inline: description: |- - inline contains JSON or YAML values specified directly in the - ClusterExtension. + inline contains JSON or YAML values specified directly in the ClusterExtension. - inline is used to specify arbitrary configuration values for the ClusterExtension. + It is used to specify arbitrary configuration values for the ClusterExtension. It must be set if configType is 'Inline' and must be a valid JSON/YAML object containing at least one property. The configuration values are validated at runtime against a JSON schema provided by the bundle. minProperties: 1 @@ -964,37 +945,35 @@ spec: : !has(self.inline)' install: description: |- - install is an optional field used to configure the installation options - for the ClusterExtension such as the pre-flight check configuration. + install is optional and configures installation options for the ClusterExtension, + such as the pre-flight check configuration. properties: preflight: description: |- - preflight is an optional field that can be used to configure the checks that are - run before installation or upgrade of the content for the package specified in the packageName field. + preflight is optional and configures the checks that run before installation or upgrade + of the content for the package specified in the packageName field. When specified, it replaces the default preflight configuration for install/upgrade actions. - When not specified, the default configuration will be used. + When not specified, the default configuration is used. properties: crdUpgradeSafety: description: |- - crdUpgradeSafety is used to configure the CRD Upgrade Safety pre-flight - checks that run prior to upgrades of installed content. + crdUpgradeSafety configures the CRD Upgrade Safety pre-flight checks that run + before upgrades of installed content. - The CRD Upgrade Safety pre-flight check safeguards from unintended - consequences of upgrading a CRD, such as data loss. + The CRD Upgrade Safety pre-flight check safeguards from unintended consequences of upgrading a CRD, + such as data loss. properties: enforcement: description: |- - enforcement is a required field, used to configure the state of the CRD Upgrade Safety pre-flight check. + enforcement is required and configures the state of the CRD Upgrade Safety pre-flight check. Allowed values are "None" or "Strict". The default value is "Strict". - When set to "None", the CRD Upgrade Safety pre-flight check will be skipped - when performing an upgrade operation. This should be used with caution as - unintended consequences such as data loss can occur. + When set to "None", the CRD Upgrade Safety pre-flight check is skipped during an upgrade operation. + Use this option with caution as unintended consequences such as data loss can occur. - When set to "Strict", the CRD Upgrade Safety pre-flight check will be run when - performing an upgrade operation. + When set to "Strict", the CRD Upgrade Safety pre-flight check runs during an upgrade operation. enum: - None - Strict @@ -1016,16 +995,15 @@ spec: rule: has(self.preflight) namespace: description: |- - namespace is a reference to a Kubernetes namespace. - This is the namespace in which the provided ServiceAccount must exist. - It also designates the default namespace where namespace-scoped resources - for the extension are applied to the cluster. + namespace specifies a Kubernetes namespace. + This is the namespace where the provided ServiceAccount must exist. + It also designates the default namespace where namespace-scoped resources for the extension are applied to the cluster. Some extensions may contain namespace-scoped resources to be applied in other namespaces. This namespace must exist. - namespace is required, immutable, and follows the DNS label standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters or hyphens (-), - start and end with an alphanumeric character, and be no longer than 63 characters + The namespace field is required, immutable, and follows the DNS label standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters or hyphens (-), start and end with an alphanumeric character, + and be no longer than 63 characters. [RFC 1123]: https://tools.ietf.org/html/rfc1123 maxLength: 63 @@ -1037,24 +1015,22 @@ spec: rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?$") serviceAccount: description: |- - serviceAccount is a reference to a ServiceAccount used to perform all interactions - with the cluster that are required to manage the extension. + serviceAccount specifies a ServiceAccount used to perform all interactions with the cluster + that are required to manage the extension. The ServiceAccount must be configured with the necessary permissions to perform these interactions. The ServiceAccount must exist in the namespace referenced in the spec. - serviceAccount is required. + The serviceAccount field is required. properties: name: description: |- - name is a required, immutable reference to the name of the ServiceAccount - to be used for installation and management of the content for the package - specified in the packageName field. + name is a required, immutable reference to the name of the ServiceAccount used for installation + and management of the content for the package specified in the packageName field. This ServiceAccount must exist in the installNamespace. - name follows the DNS subdomain standard as defined in [RFC 1123]. - It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. + The name field follows the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. Some examples of valid values are: - some-serviceaccount @@ -1083,11 +1059,11 @@ spec: type: object source: description: |- - source is a required field which selects the installation source of content - for this ClusterExtension. Selection is performed by setting the sourceType. + source is required and selects the installation source of content for this ClusterExtension. + Set the sourceType field to perform the selection. - Catalog is currently the only implemented sourceType, and setting the - sourcetype to "Catalog" requires the catalog field to also be defined. + Catalog is currently the only implemented sourceType. + Setting sourceType to "Catalog" requires the catalog field to also be defined. Below is a minimal example of a source definition (in yaml): @@ -1098,30 +1074,29 @@ spec: properties: catalog: description: |- - catalog is used to configure how information is sourced from a catalog. - This field is required when sourceType is "Catalog", and forbidden otherwise. + catalog configures how information is sourced from a catalog. + It is required when sourceType is "Catalog", and forbidden otherwise. properties: channels: description: |- - channels is an optional reference to a set of channels belonging to - the package specified in the packageName field. + channels is optional and specifies a set of channels belonging to the package + specified in the packageName field. - A "channel" is a package-author-defined stream of updates for an extension. + A channel is a package-author-defined stream of updates for an extension. - Each channel in the list must follow the DNS subdomain standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. No more than 256 channels can be specified. + Each channel in the list must follow the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. + You can specify no more than 256 channels. - When specified, it is used to constrain the set of installable bundles and - the automated upgrade path. This constraint is an AND operation with the - version field. For example: + When specified, it constrains the set of installable bundles and the automated upgrade path. + This constraint is an AND operation with the version field. For example: - Given channel is set to "foo" - Given version is set to ">=1.0.0, <1.5.0" - - Only bundles that exist in channel "foo" AND satisfy the version range comparison will be considered installable - - Automatic upgrades will be constrained to upgrade edges defined by the selected channel + - Only bundles that exist in channel "foo" AND satisfy the version range comparison are considered installable + - Automatic upgrades are constrained to upgrade edges defined by the selected channel - When unspecified, upgrade edges across all channels will be used to identify valid automatic upgrade paths. + When unspecified, upgrade edges across all channels are used to identify valid automatic upgrade paths. Some examples of valid values are: - 1.1.x @@ -1151,13 +1126,12 @@ spec: type: array packageName: description: |- - packageName is a reference to the name of the package to be installed - and is used to filter the content from catalogs. + packageName specifies the name of the package to be installed and is used to filter + the content from catalogs. - packageName is required, immutable, and follows the DNS subdomain standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. + It is required, immutable, and follows the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. Some examples of valid values are: - some-package @@ -1184,12 +1158,9 @@ spec: rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$") selector: description: |- - selector is an optional field that can be used - to filter the set of ClusterCatalogs used in the bundle - selection process. + selector is optional and filters the set of ClusterCatalogs used in the bundle selection process. - When unspecified, all ClusterCatalogs will be used in - the bundle selection process. + When unspecified, all ClusterCatalogs are used in the bundle selection process. properties: matchExpressions: description: matchExpressions is a list of label selector @@ -1237,35 +1208,34 @@ spec: upgradeConstraintPolicy: default: CatalogProvided description: |- - upgradeConstraintPolicy is an optional field that controls whether - the upgrade path(s) defined in the catalog are enforced for the package - referenced in the packageName field. + upgradeConstraintPolicy is optional and controls whether the upgrade paths defined in the catalog + are enforced for the package referenced in the packageName field. - Allowed values are: "CatalogProvided" or "SelfCertified", or omitted. + Allowed values are "CatalogProvided", "SelfCertified", or omitted. - When this field is set to "CatalogProvided", automatic upgrades will only occur - when upgrade constraints specified by the package author are met. + When set to "CatalogProvided", automatic upgrades only occur when upgrade constraints specified by the package + author are met. - When this field is set to "SelfCertified", the upgrade constraints specified by - the package author are ignored. This allows for upgrades and downgrades to - any version of the package. This is considered a dangerous operation as it - can lead to unknown and potentially disastrous outcomes, such as data - loss. It is assumed that users have independently verified changes when - using this option. + When set to "SelfCertified", the upgrade constraints specified by the package author are ignored. + This allows upgrades and downgrades to any version of the package. + This is considered a dangerous operation as it can lead to unknown and potentially disastrous outcomes, + such as data loss. + Use this option only if you have independently verified the changes. - When this field is omitted, the default value is "CatalogProvided". + When omitted, the default value is "CatalogProvided". enum: - CatalogProvided - SelfCertified type: string version: description: |- - version is an optional semver constraint (a specific version or range of versions). When unspecified, the latest version available will be installed. + version is an optional semver constraint (a specific version or range of versions). + When unspecified, the latest version available is installed. Acceptable version ranges are no longer than 64 characters. - Version ranges are composed of comma- or space-delimited values and one or - more comparison operators, known as comparison strings. Additional - comparison strings can be added using the OR operator (||). + Version ranges are composed of comma- or space-delimited values and one or more comparison operators, + known as comparison strings. + You can add additional comparison strings using the OR operator (||). # Range Comparisons @@ -1343,13 +1313,12 @@ spec: type: object sourceType: description: |- - sourceType is a required reference to the type of install source. + sourceType is required and specifies the type of install source. - Allowed values are "Catalog" + The only allowed value is "Catalog". - When this field is set to "Catalog", information for determining the - appropriate bundle of content to install will be fetched from - ClusterCatalog resources existing on the cluster. + When set to "Catalog", information for determining the appropriate bundle of content to install + is fetched from ClusterCatalog resources on the cluster. When using the Catalog sourceType, the catalog field must also be set. enum: - Catalog @@ -1455,11 +1424,13 @@ spec: x-kubernetes-list-type: map conditions: description: |- + conditions represents the current state of the ClusterExtension. + The set of condition types which apply to all spec.source variations are Installed and Progressing. - The Installed condition represents whether or not the bundle has been installed for this ClusterExtension. - When Installed is True and the Reason is Succeeded, the bundle has been successfully installed. - When Installed is False and the Reason is Failed, the bundle has failed to install. + The Installed condition represents whether the bundle has been installed for this ClusterExtension: + - When Installed is True and the Reason is Succeeded, the bundle has been successfully installed. + - When Installed is False and the Reason is Failed, the bundle has failed to install. The Progressing condition represents whether or not the ClusterExtension is advancing towards a new state. When Progressing is True and the Reason is Succeeded, the ClusterExtension is making progress towards a new state. @@ -1468,12 +1439,12 @@ spec: When Progressing is True and Reason is RollingOut, the ClusterExtension has one or more ClusterExtensionRevisions in active roll out. - When the ClusterExtension is sourced from a catalog, if may also communicate a deprecation condition. - These are indications from a package owner to guide users away from a particular package, channel, or bundle. - BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog. - ChannelDeprecated is set if the requested channel is marked deprecated in the catalog. - PackageDeprecated is set if the requested package is marked deprecated in the catalog. - Deprecated is a rollup condition that is present when any of the deprecated conditions are present. + When the ClusterExtension is sourced from a catalog, it may also communicate a deprecation condition. + These are indications from a package owner to guide users away from a particular package, channel, or bundle: + - BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog. + - ChannelDeprecated is set if the requested channel is marked deprecated in the catalog. + - PackageDeprecated is set if the requested package is marked deprecated in the catalog. + - Deprecated is a rollup condition that is present when any of the deprecated conditions are present. items: description: Condition contains details for one aspect of the current state of this API Resource. @@ -1538,17 +1509,16 @@ spec: properties: bundle: description: |- - bundle is a required field which represents the identifying attributes of a bundle. + bundle is required and represents the identifying attributes of a bundle. - A "bundle" is a versioned set of content that represents the resources that - need to be applied to a cluster to install a package. + A "bundle" is a versioned set of content that represents the resources that need to be applied + to a cluster to install a package. properties: name: description: |- - name is required and follows the DNS subdomain standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. + name is required and follows the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. type: string x-kubernetes-validations: - message: packageName must be a valid DNS1123 subdomain. @@ -1558,8 +1528,8 @@ spec: rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$") version: description: |- - version is a required field and is a reference to the version that this bundle represents - version follows the semantic versioning standard as defined in https://semver.org/. + version is required and references the version that this bundle represents. + It follows the semantic versioning standard as defined in https://semver.org/. type: string x-kubernetes-validations: - message: version must be well-formed semver @@ -2281,7 +2251,6 @@ spec: - --health-probe-bind-address=:8081 - --metrics-bind-address=:8443 - --leader-elect - - --feature-gates=SingleOwnNamespaceInstallSupport=true - --feature-gates=PreflightPermissions=true - --feature-gates=HelmChartSupport=true - --feature-gates=BoxcutterRuntime=true diff --git a/manifests/standard-e2e.yaml b/manifests/standard-e2e.yaml index 1aed38ba9..51c3b412c 100644 --- a/manifests/standard-e2e.yaml +++ b/manifests/standard-e2e.yaml @@ -152,6 +152,10 @@ data: [[registry]] prefix = "mirrored-registry.operator-controller-e2e.svc.cluster.local:5000" location = "docker-registry.operator-controller-e2e.svc.cluster.local:5000" + + [[registry]] + prefix = "dynamic-registry.operator-controller-e2e.svc.cluster.local:5000" + location = "docker-registry.operator-controller-e2e.svc.cluster.local:5000" kind: ConfigMap metadata: annotations: @@ -185,7 +189,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 olm.operatorframework.io/generator: standard name: clustercatalogs.olm.operatorframework.io spec: @@ -211,7 +215,7 @@ spec: schema: openAPIV3Schema: description: |- - ClusterCatalog enables users to make File-Based Catalog (FBC) catalog data available to the cluster. + ClusterCatalog makes File-Based Catalog (FBC) data available to your cluster. For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs properties: apiVersion: @@ -233,29 +237,24 @@ spec: type: object spec: description: |- - spec is the desired state of the ClusterCatalog. - spec is required. - The controller will work to ensure that the desired - catalog is unpacked and served over the catalog content HTTP server. + spec is a required field that defines the desired state of the ClusterCatalog. + The controller ensures that the catalog is unpacked and served over the catalog content HTTP server. properties: availabilityMode: default: Available description: |- - availabilityMode allows users to define how the ClusterCatalog is made available to clients on the cluster. - availabilityMode is optional. + availabilityMode is an optional field that defines how the ClusterCatalog is made available to clients on the cluster. - Allowed values are "Available" and "Unavailable" and omitted. + Allowed values are "Available", "Unavailable", or omitted. When omitted, the default value is "Available". - When set to "Available", the catalog contents will be unpacked and served over the catalog content HTTP server. - Setting the availabilityMode to "Available" tells clients that they should consider this ClusterCatalog - and its contents as usable. + When set to "Available", the catalog contents are unpacked and served over the catalog content HTTP server. + Clients should consider this ClusterCatalog and its contents as usable. - When set to "Unavailable", the catalog contents will no longer be served over the catalog content HTTP server. - When set to this availabilityMode it should be interpreted the same as the ClusterCatalog not existing. - Setting the availabilityMode to "Unavailable" can be useful in scenarios where a user may not want - to delete the ClusterCatalog all together, but would still like it to be treated as if it doesn't exist. + When set to "Unavailable", the catalog contents are no longer served over the catalog content HTTP server. + Treat this the same as if the ClusterCatalog does not exist. + Use "Unavailable" when you want to keep the ClusterCatalog but treat it as if it doesn't exist. enum: - Unavailable - Available @@ -263,35 +262,33 @@ spec: priority: default: 0 description: |- - priority allows the user to define a priority for a ClusterCatalog. - priority is optional. + priority is an optional field that defines a priority for this ClusterCatalog. - A ClusterCatalog's priority is used by clients as a tie-breaker between ClusterCatalogs that meet the client's requirements. - A higher number means higher priority. + Clients use the ClusterCatalog priority as a tie-breaker between ClusterCatalogs that meet their requirements. + Higher numbers mean higher priority. - It is up to clients to decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements. - When deciding how to break the tie in this scenario, it is recommended that clients prompt their users for additional input. + Clients decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements. + Clients should prompt users for additional input to break the tie. - When omitted, the default priority is 0 because that is the zero value of integers. + When omitted, the default priority is 0. - Negative numbers can be used to specify a priority lower than the default. - Positive numbers can be used to specify a priority higher than the default. + Use negative numbers to specify a priority lower than the default. + Use positive numbers to specify a priority higher than the default. The lowest possible value is -2147483648. The highest possible value is 2147483647. format: int32 + maximum: 2147483647 + minimum: -2147483648 type: integer source: description: |- - source allows a user to define the source of a catalog. - A "catalog" contains information on content that can be installed on a cluster. - Providing a catalog source makes the contents of the catalog discoverable and usable by - other on-cluster components. - These on-cluster components may do a variety of things with this information, such as - presenting the content in a GUI dashboard or installing content from the catalog on the cluster. + source is a required field that defines the source of a catalog. + A catalog contains information on content that can be installed on a cluster. + The catalog source makes catalog contents discoverable and usable by other on-cluster components. + These components can present the content in a GUI dashboard or install content from the catalog on the cluster. The catalog source must contain catalog metadata in the File-Based Catalog (FBC) format. For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs. - source is a required field. Below is a minimal example of a ClusterCatalogSpec that sources a catalog from an image: @@ -302,25 +299,23 @@ spec: properties: image: description: |- - image is used to configure how catalog contents are sourced from an OCI image. - This field is required when type is Image, and forbidden otherwise. + image configures how catalog contents are sourced from an OCI image. + It is required when type is Image, and forbidden otherwise. properties: pollIntervalMinutes: description: |- - pollIntervalMinutes allows the user to set the interval, in minutes, at which the image source should be polled for new content. - pollIntervalMinutes is optional. - pollIntervalMinutes can not be specified when ref is a digest-based reference. + pollIntervalMinutes is an optional field that sets the interval, in minutes, at which the image source is polled for new content. + You cannot specify pollIntervalMinutes when ref is a digest-based reference. - When omitted, the image will not be polled for new content. + When omitted, the image is not polled for new content. minimum: 1 type: integer ref: description: |- - ref allows users to define the reference to a container image containing Catalog contents. - ref is required. - ref can not be more than 1000 characters. + ref is a required field that defines the reference to a container image containing catalog contents. + It cannot be more than 1000 characters. - A reference can be broken down into 3 parts - the domain, name, and identifier. + A reference has 3 parts: the domain, name, and identifier. The domain is typically the registry where an image is located. It must be alphanumeric characters (lowercase and uppercase) separated by the "." character. @@ -403,12 +398,11 @@ spec: : true' type: description: |- - type is a reference to the type of source the catalog is sourced from. - type is required. + type is a required field that specifies the type of source for the catalog. The only allowed value is "Image". - When set to "Image", the ClusterCatalog content will be sourced from an OCI image. + When set to "Image", the ClusterCatalog content is sourced from an OCI image. When using an image source, the image field must be set and must be the only field defined for this type. enum: - Image @@ -426,31 +420,30 @@ spec: type: object status: description: |- - status contains information about the state of the ClusterCatalog such as: - - Whether or not the catalog contents are being served via the catalog content HTTP server - - Whether or not the ClusterCatalog is progressing to a new state + status contains the following information about the state of the ClusterCatalog: + - Whether the catalog contents are being served via the catalog content HTTP server + - Whether the ClusterCatalog is progressing to a new state - A reference to the source from which the catalog contents were retrieved properties: conditions: description: |- - conditions is a representation of the current state for this ClusterCatalog. + conditions represents the current state of this ClusterCatalog. The current condition types are Serving and Progressing. - The Serving condition is used to represent whether or not the contents of the catalog is being served via the HTTP(S) web server. - When it has a status of True and a reason of Available, the contents of the catalog are being served. - When it has a status of False and a reason of Unavailable, the contents of the catalog are not being served because the contents are not yet available. - When it has a status of False and a reason of UserSpecifiedUnavailable, the contents of the catalog are not being served because the catalog has been intentionally marked as unavailable. + The Serving condition represents whether the catalog contents are being served via the HTTP(S) web server: + - When status is True and reason is Available, the catalog contents are being served. + - When status is False and reason is Unavailable, the catalog contents are not being served because the contents are not yet available. + - When status is False and reason is UserSpecifiedUnavailable, the catalog contents are not being served because the catalog has been intentionally marked as unavailable. - The Progressing condition is used to represent whether or not the ClusterCatalog is progressing or is ready to progress towards a new state. - When it has a status of True and a reason of Retrying, there was an error in the progression of the ClusterCatalog that may be resolved on subsequent reconciliation attempts. - When it has a status of True and a reason of Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing. - When it has a status of False and a reason of Blocked, there was an error in the progression of the ClusterCatalog that requires manual intervention for recovery. + The Progressing condition represents whether the ClusterCatalog is progressing or is ready to progress towards a new state: + - When status is True and reason is Retrying, an error occurred that may be resolved on subsequent reconciliation attempts. + - When status is True and reason is Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing. + - When status is False and reason is Blocked, an error occurred that requires manual intervention for recovery. - In the case that the Serving condition is True with reason Available and Progressing is True with reason Retrying, the previously fetched - catalog contents are still being served via the HTTP(S) web server while we are progressing towards serving a new version of the catalog - contents. This could occur when we've initially fetched the latest contents from the source for this catalog and when polling for changes - to the contents we identify that there are updates to the contents. + If the system initially fetched contents and polling identifies updates, both conditions can be active simultaneously: + - The Serving condition remains True with reason Available because the previous contents are still served via the HTTP(S) web server. + - The Progressing condition is True with reason Retrying because the system is working to serve the new version. items: description: Condition contains details for one aspect of the current state of this API Resource. @@ -511,11 +504,9 @@ spec: x-kubernetes-list-type: map lastUnpacked: description: |- - lastUnpacked represents the last time the contents of the - catalog were extracted from their source format. As an example, - when using an Image source, the OCI image will be pulled and the - image layers written to a file-system backed cache. We refer to the - act of this extraction from the source format as "unpacking". + lastUnpacked represents the last time the catalog contents were extracted from their source format. + For example, when using an Image source, the OCI image is pulled and image layers are written to a file-system backed cache. + This extraction from the source format is called "unpacking". format: date-time type: string resolvedSource: @@ -524,14 +515,14 @@ spec: properties: image: description: |- - image is a field containing resolution information for a catalog sourced from an image. - This field must be set when type is Image, and forbidden otherwise. + image contains resolution information for a catalog sourced from an image. + It must be set when type is Image, and forbidden otherwise. properties: ref: description: |- ref contains the resolved image digest-based reference. - The digest format is used so users can use other tooling to fetch the exact - OCI manifests that were used to extract the catalog contents. + The digest format allows you to use other tooling to fetch the exact OCI manifests + that were used to extract the catalog contents. maxLength: 1000 type: string x-kubernetes-validations: @@ -565,12 +556,11 @@ spec: type: object type: description: |- - type is a reference to the type of source the catalog is sourced from. - type is required. + type is a required field that specifies the type of source for the catalog. The only allowed value is "Image". - When set to "Image", information about the resolved image source will be set in the 'image' field. + When set to "Image", information about the resolved image source is set in the image field. enum: - Image type: string @@ -589,19 +579,16 @@ spec: properties: base: description: |- - base is a cluster-internal URL that provides endpoints for - accessing the content of the catalog. + base is a cluster-internal URL that provides endpoints for accessing the catalog content. - It is expected that clients append the path for the endpoint they wish - to access. + Clients should append the path for the endpoint they want to access. - Currently, only a single endpoint is served and is accessible at the path - /api/v1. + Currently, only a single endpoint is served and is accessible at the path /api/v1. The endpoints served for the v1 API are: - - /all - this endpoint returns the entirety of the catalog contents in the FBC format + - /all - this endpoint returns the entire catalog contents in the FBC format - As the needs of users and clients of the evolve, new endpoints may be added. + New endpoints may be added as needs evolve. maxLength: 525 type: string x-kubernetes-validations: @@ -628,7 +615,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 olm.operatorframework.io/generator: standard name: clusterextensions.olm.operatorframework.io spec: @@ -682,39 +669,75 @@ spec: description: spec is an optional field that defines the desired state of the ClusterExtension. properties: + config: + description: |- + config is optional and specifies bundle-specific configuration. + Configuration is bundle-specific and a bundle may provide a configuration schema. + When not specified, the default configuration of the resolved bundle is used. + + config is validated against a configuration schema provided by the resolved bundle. If the bundle does not provide + a configuration schema the bundle is deemed to not be configurable. More information on how + to configure bundles can be found in the OLM documentation associated with your current OLM version. + properties: + configType: + description: |- + configType is required and specifies the type of configuration source. + + The only allowed value is "Inline". + + When set to "Inline", the cluster extension configuration is defined inline within the ClusterExtension resource. + enum: + - Inline + type: string + inline: + description: |- + inline contains JSON or YAML values specified directly in the ClusterExtension. + + It is used to specify arbitrary configuration values for the ClusterExtension. + It must be set if configType is 'Inline' and must be a valid JSON/YAML object containing at least one property. + The configuration values are validated at runtime against a JSON schema provided by the bundle. + minProperties: 1 + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - configType + type: object + x-kubernetes-validations: + - message: inline is required when configType is Inline, and forbidden + otherwise + rule: 'has(self.configType) && self.configType == ''Inline'' ?has(self.inline) + : !has(self.inline)' install: description: |- - install is an optional field used to configure the installation options - for the ClusterExtension such as the pre-flight check configuration. + install is optional and configures installation options for the ClusterExtension, + such as the pre-flight check configuration. properties: preflight: description: |- - preflight is an optional field that can be used to configure the checks that are - run before installation or upgrade of the content for the package specified in the packageName field. + preflight is optional and configures the checks that run before installation or upgrade + of the content for the package specified in the packageName field. When specified, it replaces the default preflight configuration for install/upgrade actions. - When not specified, the default configuration will be used. + When not specified, the default configuration is used. properties: crdUpgradeSafety: description: |- - crdUpgradeSafety is used to configure the CRD Upgrade Safety pre-flight - checks that run prior to upgrades of installed content. + crdUpgradeSafety configures the CRD Upgrade Safety pre-flight checks that run + before upgrades of installed content. - The CRD Upgrade Safety pre-flight check safeguards from unintended - consequences of upgrading a CRD, such as data loss. + The CRD Upgrade Safety pre-flight check safeguards from unintended consequences of upgrading a CRD, + such as data loss. properties: enforcement: description: |- - enforcement is a required field, used to configure the state of the CRD Upgrade Safety pre-flight check. + enforcement is required and configures the state of the CRD Upgrade Safety pre-flight check. Allowed values are "None" or "Strict". The default value is "Strict". - When set to "None", the CRD Upgrade Safety pre-flight check will be skipped - when performing an upgrade operation. This should be used with caution as - unintended consequences such as data loss can occur. + When set to "None", the CRD Upgrade Safety pre-flight check is skipped during an upgrade operation. + Use this option with caution as unintended consequences such as data loss can occur. - When set to "Strict", the CRD Upgrade Safety pre-flight check will be run when - performing an upgrade operation. + When set to "Strict", the CRD Upgrade Safety pre-flight check runs during an upgrade operation. enum: - None - Strict @@ -736,16 +759,15 @@ spec: rule: has(self.preflight) namespace: description: |- - namespace is a reference to a Kubernetes namespace. - This is the namespace in which the provided ServiceAccount must exist. - It also designates the default namespace where namespace-scoped resources - for the extension are applied to the cluster. + namespace specifies a Kubernetes namespace. + This is the namespace where the provided ServiceAccount must exist. + It also designates the default namespace where namespace-scoped resources for the extension are applied to the cluster. Some extensions may contain namespace-scoped resources to be applied in other namespaces. This namespace must exist. - namespace is required, immutable, and follows the DNS label standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters or hyphens (-), - start and end with an alphanumeric character, and be no longer than 63 characters + The namespace field is required, immutable, and follows the DNS label standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters or hyphens (-), start and end with an alphanumeric character, + and be no longer than 63 characters. [RFC 1123]: https://tools.ietf.org/html/rfc1123 maxLength: 63 @@ -757,24 +779,22 @@ spec: rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?$") serviceAccount: description: |- - serviceAccount is a reference to a ServiceAccount used to perform all interactions - with the cluster that are required to manage the extension. + serviceAccount specifies a ServiceAccount used to perform all interactions with the cluster + that are required to manage the extension. The ServiceAccount must be configured with the necessary permissions to perform these interactions. The ServiceAccount must exist in the namespace referenced in the spec. - serviceAccount is required. + The serviceAccount field is required. properties: name: description: |- - name is a required, immutable reference to the name of the ServiceAccount - to be used for installation and management of the content for the package - specified in the packageName field. + name is a required, immutable reference to the name of the ServiceAccount used for installation + and management of the content for the package specified in the packageName field. This ServiceAccount must exist in the installNamespace. - name follows the DNS subdomain standard as defined in [RFC 1123]. - It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. + The name field follows the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. Some examples of valid values are: - some-serviceaccount @@ -803,11 +823,11 @@ spec: type: object source: description: |- - source is a required field which selects the installation source of content - for this ClusterExtension. Selection is performed by setting the sourceType. + source is required and selects the installation source of content for this ClusterExtension. + Set the sourceType field to perform the selection. - Catalog is currently the only implemented sourceType, and setting the - sourcetype to "Catalog" requires the catalog field to also be defined. + Catalog is currently the only implemented sourceType. + Setting sourceType to "Catalog" requires the catalog field to also be defined. Below is a minimal example of a source definition (in yaml): @@ -818,30 +838,29 @@ spec: properties: catalog: description: |- - catalog is used to configure how information is sourced from a catalog. - This field is required when sourceType is "Catalog", and forbidden otherwise. + catalog configures how information is sourced from a catalog. + It is required when sourceType is "Catalog", and forbidden otherwise. properties: channels: description: |- - channels is an optional reference to a set of channels belonging to - the package specified in the packageName field. + channels is optional and specifies a set of channels belonging to the package + specified in the packageName field. - A "channel" is a package-author-defined stream of updates for an extension. + A channel is a package-author-defined stream of updates for an extension. - Each channel in the list must follow the DNS subdomain standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. No more than 256 channels can be specified. + Each channel in the list must follow the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. + You can specify no more than 256 channels. - When specified, it is used to constrain the set of installable bundles and - the automated upgrade path. This constraint is an AND operation with the - version field. For example: + When specified, it constrains the set of installable bundles and the automated upgrade path. + This constraint is an AND operation with the version field. For example: - Given channel is set to "foo" - Given version is set to ">=1.0.0, <1.5.0" - - Only bundles that exist in channel "foo" AND satisfy the version range comparison will be considered installable - - Automatic upgrades will be constrained to upgrade edges defined by the selected channel + - Only bundles that exist in channel "foo" AND satisfy the version range comparison are considered installable + - Automatic upgrades are constrained to upgrade edges defined by the selected channel - When unspecified, upgrade edges across all channels will be used to identify valid automatic upgrade paths. + When unspecified, upgrade edges across all channels are used to identify valid automatic upgrade paths. Some examples of valid values are: - 1.1.x @@ -871,13 +890,12 @@ spec: type: array packageName: description: |- - packageName is a reference to the name of the package to be installed - and is used to filter the content from catalogs. + packageName specifies the name of the package to be installed and is used to filter + the content from catalogs. - packageName is required, immutable, and follows the DNS subdomain standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. + It is required, immutable, and follows the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. Some examples of valid values are: - some-package @@ -904,12 +922,9 @@ spec: rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$") selector: description: |- - selector is an optional field that can be used - to filter the set of ClusterCatalogs used in the bundle - selection process. + selector is optional and filters the set of ClusterCatalogs used in the bundle selection process. - When unspecified, all ClusterCatalogs will be used in - the bundle selection process. + When unspecified, all ClusterCatalogs are used in the bundle selection process. properties: matchExpressions: description: matchExpressions is a list of label selector @@ -957,35 +972,34 @@ spec: upgradeConstraintPolicy: default: CatalogProvided description: |- - upgradeConstraintPolicy is an optional field that controls whether - the upgrade path(s) defined in the catalog are enforced for the package - referenced in the packageName field. + upgradeConstraintPolicy is optional and controls whether the upgrade paths defined in the catalog + are enforced for the package referenced in the packageName field. - Allowed values are: "CatalogProvided" or "SelfCertified", or omitted. + Allowed values are "CatalogProvided", "SelfCertified", or omitted. - When this field is set to "CatalogProvided", automatic upgrades will only occur - when upgrade constraints specified by the package author are met. + When set to "CatalogProvided", automatic upgrades only occur when upgrade constraints specified by the package + author are met. - When this field is set to "SelfCertified", the upgrade constraints specified by - the package author are ignored. This allows for upgrades and downgrades to - any version of the package. This is considered a dangerous operation as it - can lead to unknown and potentially disastrous outcomes, such as data - loss. It is assumed that users have independently verified changes when - using this option. + When set to "SelfCertified", the upgrade constraints specified by the package author are ignored. + This allows upgrades and downgrades to any version of the package. + This is considered a dangerous operation as it can lead to unknown and potentially disastrous outcomes, + such as data loss. + Use this option only if you have independently verified the changes. - When this field is omitted, the default value is "CatalogProvided". + When omitted, the default value is "CatalogProvided". enum: - CatalogProvided - SelfCertified type: string version: description: |- - version is an optional semver constraint (a specific version or range of versions). When unspecified, the latest version available will be installed. + version is an optional semver constraint (a specific version or range of versions). + When unspecified, the latest version available is installed. Acceptable version ranges are no longer than 64 characters. - Version ranges are composed of comma- or space-delimited values and one or - more comparison operators, known as comparison strings. Additional - comparison strings can be added using the OR operator (||). + Version ranges are composed of comma- or space-delimited values and one or more comparison operators, + known as comparison strings. + You can add additional comparison strings using the OR operator (||). # Range Comparisons @@ -1063,13 +1077,12 @@ spec: type: object sourceType: description: |- - sourceType is a required reference to the type of install source. + sourceType is required and specifies the type of install source. - Allowed values are "Catalog" + The only allowed value is "Catalog". - When this field is set to "Catalog", information for determining the - appropriate bundle of content to install will be fetched from - ClusterCatalog resources existing on the cluster. + When set to "Catalog", information for determining the appropriate bundle of content to install + is fetched from ClusterCatalog resources on the cluster. When using the Catalog sourceType, the catalog field must also be set. enum: - Catalog @@ -1093,23 +1106,25 @@ spec: properties: conditions: description: |- + conditions represents the current state of the ClusterExtension. + The set of condition types which apply to all spec.source variations are Installed and Progressing. - The Installed condition represents whether or not the bundle has been installed for this ClusterExtension. - When Installed is True and the Reason is Succeeded, the bundle has been successfully installed. - When Installed is False and the Reason is Failed, the bundle has failed to install. + The Installed condition represents whether the bundle has been installed for this ClusterExtension: + - When Installed is True and the Reason is Succeeded, the bundle has been successfully installed. + - When Installed is False and the Reason is Failed, the bundle has failed to install. The Progressing condition represents whether or not the ClusterExtension is advancing towards a new state. When Progressing is True and the Reason is Succeeded, the ClusterExtension is making progress towards a new state. When Progressing is True and the Reason is Retrying, the ClusterExtension has encountered an error that could be resolved on subsequent reconciliation attempts. When Progressing is False and the Reason is Blocked, the ClusterExtension has encountered an error that requires manual intervention for recovery. - When the ClusterExtension is sourced from a catalog, if may also communicate a deprecation condition. - These are indications from a package owner to guide users away from a particular package, channel, or bundle. - BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog. - ChannelDeprecated is set if the requested channel is marked deprecated in the catalog. - PackageDeprecated is set if the requested package is marked deprecated in the catalog. - Deprecated is a rollup condition that is present when any of the deprecated conditions are present. + When the ClusterExtension is sourced from a catalog, it may also communicate a deprecation condition. + These are indications from a package owner to guide users away from a particular package, channel, or bundle: + - BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog. + - ChannelDeprecated is set if the requested channel is marked deprecated in the catalog. + - PackageDeprecated is set if the requested package is marked deprecated in the catalog. + - Deprecated is a rollup condition that is present when any of the deprecated conditions are present. items: description: Condition contains details for one aspect of the current state of this API Resource. @@ -1174,17 +1189,16 @@ spec: properties: bundle: description: |- - bundle is a required field which represents the identifying attributes of a bundle. + bundle is required and represents the identifying attributes of a bundle. - A "bundle" is a versioned set of content that represents the resources that - need to be applied to a cluster to install a package. + A "bundle" is a versioned set of content that represents the resources that need to be applied + to a cluster to install a package. properties: name: description: |- - name is required and follows the DNS subdomain standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. + name is required and follows the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. type: string x-kubernetes-validations: - message: packageName must be a valid DNS1123 subdomain. @@ -1194,8 +1208,8 @@ spec: rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$") version: description: |- - version is a required field and is a reference to the version that this bundle represents - version follows the semantic versioning standard as defined in https://semver.org/. + version is required and references the version that this bundle represents. + It follows the semantic versioning standard as defined in https://semver.org/. type: string x-kubernetes-validations: - message: version must be well-formed semver @@ -1760,6 +1774,7 @@ spec: image: busybox:1.36 name: tar securityContext: + readOnlyRootFilesystem: true allowPrivilegeEscalation: false capabilities: drop: diff --git a/manifests/standard.yaml b/manifests/standard.yaml index 34cc57918..16c489f76 100644 --- a/manifests/standard.yaml +++ b/manifests/standard.yaml @@ -150,7 +150,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 olm.operatorframework.io/generator: standard name: clustercatalogs.olm.operatorframework.io spec: @@ -176,7 +176,7 @@ spec: schema: openAPIV3Schema: description: |- - ClusterCatalog enables users to make File-Based Catalog (FBC) catalog data available to the cluster. + ClusterCatalog makes File-Based Catalog (FBC) data available to your cluster. For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs properties: apiVersion: @@ -198,29 +198,24 @@ spec: type: object spec: description: |- - spec is the desired state of the ClusterCatalog. - spec is required. - The controller will work to ensure that the desired - catalog is unpacked and served over the catalog content HTTP server. + spec is a required field that defines the desired state of the ClusterCatalog. + The controller ensures that the catalog is unpacked and served over the catalog content HTTP server. properties: availabilityMode: default: Available description: |- - availabilityMode allows users to define how the ClusterCatalog is made available to clients on the cluster. - availabilityMode is optional. + availabilityMode is an optional field that defines how the ClusterCatalog is made available to clients on the cluster. - Allowed values are "Available" and "Unavailable" and omitted. + Allowed values are "Available", "Unavailable", or omitted. When omitted, the default value is "Available". - When set to "Available", the catalog contents will be unpacked and served over the catalog content HTTP server. - Setting the availabilityMode to "Available" tells clients that they should consider this ClusterCatalog - and its contents as usable. + When set to "Available", the catalog contents are unpacked and served over the catalog content HTTP server. + Clients should consider this ClusterCatalog and its contents as usable. - When set to "Unavailable", the catalog contents will no longer be served over the catalog content HTTP server. - When set to this availabilityMode it should be interpreted the same as the ClusterCatalog not existing. - Setting the availabilityMode to "Unavailable" can be useful in scenarios where a user may not want - to delete the ClusterCatalog all together, but would still like it to be treated as if it doesn't exist. + When set to "Unavailable", the catalog contents are no longer served over the catalog content HTTP server. + Treat this the same as if the ClusterCatalog does not exist. + Use "Unavailable" when you want to keep the ClusterCatalog but treat it as if it doesn't exist. enum: - Unavailable - Available @@ -228,35 +223,33 @@ spec: priority: default: 0 description: |- - priority allows the user to define a priority for a ClusterCatalog. - priority is optional. + priority is an optional field that defines a priority for this ClusterCatalog. - A ClusterCatalog's priority is used by clients as a tie-breaker between ClusterCatalogs that meet the client's requirements. - A higher number means higher priority. + Clients use the ClusterCatalog priority as a tie-breaker between ClusterCatalogs that meet their requirements. + Higher numbers mean higher priority. - It is up to clients to decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements. - When deciding how to break the tie in this scenario, it is recommended that clients prompt their users for additional input. + Clients decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements. + Clients should prompt users for additional input to break the tie. - When omitted, the default priority is 0 because that is the zero value of integers. + When omitted, the default priority is 0. - Negative numbers can be used to specify a priority lower than the default. - Positive numbers can be used to specify a priority higher than the default. + Use negative numbers to specify a priority lower than the default. + Use positive numbers to specify a priority higher than the default. The lowest possible value is -2147483648. The highest possible value is 2147483647. format: int32 + maximum: 2147483647 + minimum: -2147483648 type: integer source: description: |- - source allows a user to define the source of a catalog. - A "catalog" contains information on content that can be installed on a cluster. - Providing a catalog source makes the contents of the catalog discoverable and usable by - other on-cluster components. - These on-cluster components may do a variety of things with this information, such as - presenting the content in a GUI dashboard or installing content from the catalog on the cluster. + source is a required field that defines the source of a catalog. + A catalog contains information on content that can be installed on a cluster. + The catalog source makes catalog contents discoverable and usable by other on-cluster components. + These components can present the content in a GUI dashboard or install content from the catalog on the cluster. The catalog source must contain catalog metadata in the File-Based Catalog (FBC) format. For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs. - source is a required field. Below is a minimal example of a ClusterCatalogSpec that sources a catalog from an image: @@ -267,25 +260,23 @@ spec: properties: image: description: |- - image is used to configure how catalog contents are sourced from an OCI image. - This field is required when type is Image, and forbidden otherwise. + image configures how catalog contents are sourced from an OCI image. + It is required when type is Image, and forbidden otherwise. properties: pollIntervalMinutes: description: |- - pollIntervalMinutes allows the user to set the interval, in minutes, at which the image source should be polled for new content. - pollIntervalMinutes is optional. - pollIntervalMinutes can not be specified when ref is a digest-based reference. + pollIntervalMinutes is an optional field that sets the interval, in minutes, at which the image source is polled for new content. + You cannot specify pollIntervalMinutes when ref is a digest-based reference. - When omitted, the image will not be polled for new content. + When omitted, the image is not polled for new content. minimum: 1 type: integer ref: description: |- - ref allows users to define the reference to a container image containing Catalog contents. - ref is required. - ref can not be more than 1000 characters. + ref is a required field that defines the reference to a container image containing catalog contents. + It cannot be more than 1000 characters. - A reference can be broken down into 3 parts - the domain, name, and identifier. + A reference has 3 parts: the domain, name, and identifier. The domain is typically the registry where an image is located. It must be alphanumeric characters (lowercase and uppercase) separated by the "." character. @@ -368,12 +359,11 @@ spec: : true' type: description: |- - type is a reference to the type of source the catalog is sourced from. - type is required. + type is a required field that specifies the type of source for the catalog. The only allowed value is "Image". - When set to "Image", the ClusterCatalog content will be sourced from an OCI image. + When set to "Image", the ClusterCatalog content is sourced from an OCI image. When using an image source, the image field must be set and must be the only field defined for this type. enum: - Image @@ -391,31 +381,30 @@ spec: type: object status: description: |- - status contains information about the state of the ClusterCatalog such as: - - Whether or not the catalog contents are being served via the catalog content HTTP server - - Whether or not the ClusterCatalog is progressing to a new state + status contains the following information about the state of the ClusterCatalog: + - Whether the catalog contents are being served via the catalog content HTTP server + - Whether the ClusterCatalog is progressing to a new state - A reference to the source from which the catalog contents were retrieved properties: conditions: description: |- - conditions is a representation of the current state for this ClusterCatalog. + conditions represents the current state of this ClusterCatalog. The current condition types are Serving and Progressing. - The Serving condition is used to represent whether or not the contents of the catalog is being served via the HTTP(S) web server. - When it has a status of True and a reason of Available, the contents of the catalog are being served. - When it has a status of False and a reason of Unavailable, the contents of the catalog are not being served because the contents are not yet available. - When it has a status of False and a reason of UserSpecifiedUnavailable, the contents of the catalog are not being served because the catalog has been intentionally marked as unavailable. + The Serving condition represents whether the catalog contents are being served via the HTTP(S) web server: + - When status is True and reason is Available, the catalog contents are being served. + - When status is False and reason is Unavailable, the catalog contents are not being served because the contents are not yet available. + - When status is False and reason is UserSpecifiedUnavailable, the catalog contents are not being served because the catalog has been intentionally marked as unavailable. - The Progressing condition is used to represent whether or not the ClusterCatalog is progressing or is ready to progress towards a new state. - When it has a status of True and a reason of Retrying, there was an error in the progression of the ClusterCatalog that may be resolved on subsequent reconciliation attempts. - When it has a status of True and a reason of Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing. - When it has a status of False and a reason of Blocked, there was an error in the progression of the ClusterCatalog that requires manual intervention for recovery. + The Progressing condition represents whether the ClusterCatalog is progressing or is ready to progress towards a new state: + - When status is True and reason is Retrying, an error occurred that may be resolved on subsequent reconciliation attempts. + - When status is True and reason is Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing. + - When status is False and reason is Blocked, an error occurred that requires manual intervention for recovery. - In the case that the Serving condition is True with reason Available and Progressing is True with reason Retrying, the previously fetched - catalog contents are still being served via the HTTP(S) web server while we are progressing towards serving a new version of the catalog - contents. This could occur when we've initially fetched the latest contents from the source for this catalog and when polling for changes - to the contents we identify that there are updates to the contents. + If the system initially fetched contents and polling identifies updates, both conditions can be active simultaneously: + - The Serving condition remains True with reason Available because the previous contents are still served via the HTTP(S) web server. + - The Progressing condition is True with reason Retrying because the system is working to serve the new version. items: description: Condition contains details for one aspect of the current state of this API Resource. @@ -476,11 +465,9 @@ spec: x-kubernetes-list-type: map lastUnpacked: description: |- - lastUnpacked represents the last time the contents of the - catalog were extracted from their source format. As an example, - when using an Image source, the OCI image will be pulled and the - image layers written to a file-system backed cache. We refer to the - act of this extraction from the source format as "unpacking". + lastUnpacked represents the last time the catalog contents were extracted from their source format. + For example, when using an Image source, the OCI image is pulled and image layers are written to a file-system backed cache. + This extraction from the source format is called "unpacking". format: date-time type: string resolvedSource: @@ -489,14 +476,14 @@ spec: properties: image: description: |- - image is a field containing resolution information for a catalog sourced from an image. - This field must be set when type is Image, and forbidden otherwise. + image contains resolution information for a catalog sourced from an image. + It must be set when type is Image, and forbidden otherwise. properties: ref: description: |- ref contains the resolved image digest-based reference. - The digest format is used so users can use other tooling to fetch the exact - OCI manifests that were used to extract the catalog contents. + The digest format allows you to use other tooling to fetch the exact OCI manifests + that were used to extract the catalog contents. maxLength: 1000 type: string x-kubernetes-validations: @@ -530,12 +517,11 @@ spec: type: object type: description: |- - type is a reference to the type of source the catalog is sourced from. - type is required. + type is a required field that specifies the type of source for the catalog. The only allowed value is "Image". - When set to "Image", information about the resolved image source will be set in the 'image' field. + When set to "Image", information about the resolved image source is set in the image field. enum: - Image type: string @@ -554,19 +540,16 @@ spec: properties: base: description: |- - base is a cluster-internal URL that provides endpoints for - accessing the content of the catalog. + base is a cluster-internal URL that provides endpoints for accessing the catalog content. - It is expected that clients append the path for the endpoint they wish - to access. + Clients should append the path for the endpoint they want to access. - Currently, only a single endpoint is served and is accessible at the path - /api/v1. + Currently, only a single endpoint is served and is accessible at the path /api/v1. The endpoints served for the v1 API are: - - /all - this endpoint returns the entirety of the catalog contents in the FBC format + - /all - this endpoint returns the entire catalog contents in the FBC format - As the needs of users and clients of the evolve, new endpoints may be added. + New endpoints may be added as needs evolve. maxLength: 525 type: string x-kubernetes-validations: @@ -593,7 +576,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 olm.operatorframework.io/generator: standard name: clusterextensions.olm.operatorframework.io spec: @@ -647,39 +630,75 @@ spec: description: spec is an optional field that defines the desired state of the ClusterExtension. properties: + config: + description: |- + config is optional and specifies bundle-specific configuration. + Configuration is bundle-specific and a bundle may provide a configuration schema. + When not specified, the default configuration of the resolved bundle is used. + + config is validated against a configuration schema provided by the resolved bundle. If the bundle does not provide + a configuration schema the bundle is deemed to not be configurable. More information on how + to configure bundles can be found in the OLM documentation associated with your current OLM version. + properties: + configType: + description: |- + configType is required and specifies the type of configuration source. + + The only allowed value is "Inline". + + When set to "Inline", the cluster extension configuration is defined inline within the ClusterExtension resource. + enum: + - Inline + type: string + inline: + description: |- + inline contains JSON or YAML values specified directly in the ClusterExtension. + + It is used to specify arbitrary configuration values for the ClusterExtension. + It must be set if configType is 'Inline' and must be a valid JSON/YAML object containing at least one property. + The configuration values are validated at runtime against a JSON schema provided by the bundle. + minProperties: 1 + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - configType + type: object + x-kubernetes-validations: + - message: inline is required when configType is Inline, and forbidden + otherwise + rule: 'has(self.configType) && self.configType == ''Inline'' ?has(self.inline) + : !has(self.inline)' install: description: |- - install is an optional field used to configure the installation options - for the ClusterExtension such as the pre-flight check configuration. + install is optional and configures installation options for the ClusterExtension, + such as the pre-flight check configuration. properties: preflight: description: |- - preflight is an optional field that can be used to configure the checks that are - run before installation or upgrade of the content for the package specified in the packageName field. + preflight is optional and configures the checks that run before installation or upgrade + of the content for the package specified in the packageName field. When specified, it replaces the default preflight configuration for install/upgrade actions. - When not specified, the default configuration will be used. + When not specified, the default configuration is used. properties: crdUpgradeSafety: description: |- - crdUpgradeSafety is used to configure the CRD Upgrade Safety pre-flight - checks that run prior to upgrades of installed content. + crdUpgradeSafety configures the CRD Upgrade Safety pre-flight checks that run + before upgrades of installed content. - The CRD Upgrade Safety pre-flight check safeguards from unintended - consequences of upgrading a CRD, such as data loss. + The CRD Upgrade Safety pre-flight check safeguards from unintended consequences of upgrading a CRD, + such as data loss. properties: enforcement: description: |- - enforcement is a required field, used to configure the state of the CRD Upgrade Safety pre-flight check. + enforcement is required and configures the state of the CRD Upgrade Safety pre-flight check. Allowed values are "None" or "Strict". The default value is "Strict". - When set to "None", the CRD Upgrade Safety pre-flight check will be skipped - when performing an upgrade operation. This should be used with caution as - unintended consequences such as data loss can occur. + When set to "None", the CRD Upgrade Safety pre-flight check is skipped during an upgrade operation. + Use this option with caution as unintended consequences such as data loss can occur. - When set to "Strict", the CRD Upgrade Safety pre-flight check will be run when - performing an upgrade operation. + When set to "Strict", the CRD Upgrade Safety pre-flight check runs during an upgrade operation. enum: - None - Strict @@ -701,16 +720,15 @@ spec: rule: has(self.preflight) namespace: description: |- - namespace is a reference to a Kubernetes namespace. - This is the namespace in which the provided ServiceAccount must exist. - It also designates the default namespace where namespace-scoped resources - for the extension are applied to the cluster. + namespace specifies a Kubernetes namespace. + This is the namespace where the provided ServiceAccount must exist. + It also designates the default namespace where namespace-scoped resources for the extension are applied to the cluster. Some extensions may contain namespace-scoped resources to be applied in other namespaces. This namespace must exist. - namespace is required, immutable, and follows the DNS label standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters or hyphens (-), - start and end with an alphanumeric character, and be no longer than 63 characters + The namespace field is required, immutable, and follows the DNS label standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters or hyphens (-), start and end with an alphanumeric character, + and be no longer than 63 characters. [RFC 1123]: https://tools.ietf.org/html/rfc1123 maxLength: 63 @@ -722,24 +740,22 @@ spec: rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?$") serviceAccount: description: |- - serviceAccount is a reference to a ServiceAccount used to perform all interactions - with the cluster that are required to manage the extension. + serviceAccount specifies a ServiceAccount used to perform all interactions with the cluster + that are required to manage the extension. The ServiceAccount must be configured with the necessary permissions to perform these interactions. The ServiceAccount must exist in the namespace referenced in the spec. - serviceAccount is required. + The serviceAccount field is required. properties: name: description: |- - name is a required, immutable reference to the name of the ServiceAccount - to be used for installation and management of the content for the package - specified in the packageName field. + name is a required, immutable reference to the name of the ServiceAccount used for installation + and management of the content for the package specified in the packageName field. This ServiceAccount must exist in the installNamespace. - name follows the DNS subdomain standard as defined in [RFC 1123]. - It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. + The name field follows the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. Some examples of valid values are: - some-serviceaccount @@ -768,11 +784,11 @@ spec: type: object source: description: |- - source is a required field which selects the installation source of content - for this ClusterExtension. Selection is performed by setting the sourceType. + source is required and selects the installation source of content for this ClusterExtension. + Set the sourceType field to perform the selection. - Catalog is currently the only implemented sourceType, and setting the - sourcetype to "Catalog" requires the catalog field to also be defined. + Catalog is currently the only implemented sourceType. + Setting sourceType to "Catalog" requires the catalog field to also be defined. Below is a minimal example of a source definition (in yaml): @@ -783,30 +799,29 @@ spec: properties: catalog: description: |- - catalog is used to configure how information is sourced from a catalog. - This field is required when sourceType is "Catalog", and forbidden otherwise. + catalog configures how information is sourced from a catalog. + It is required when sourceType is "Catalog", and forbidden otherwise. properties: channels: description: |- - channels is an optional reference to a set of channels belonging to - the package specified in the packageName field. + channels is optional and specifies a set of channels belonging to the package + specified in the packageName field. - A "channel" is a package-author-defined stream of updates for an extension. + A channel is a package-author-defined stream of updates for an extension. - Each channel in the list must follow the DNS subdomain standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. No more than 256 channels can be specified. + Each channel in the list must follow the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. + You can specify no more than 256 channels. - When specified, it is used to constrain the set of installable bundles and - the automated upgrade path. This constraint is an AND operation with the - version field. For example: + When specified, it constrains the set of installable bundles and the automated upgrade path. + This constraint is an AND operation with the version field. For example: - Given channel is set to "foo" - Given version is set to ">=1.0.0, <1.5.0" - - Only bundles that exist in channel "foo" AND satisfy the version range comparison will be considered installable - - Automatic upgrades will be constrained to upgrade edges defined by the selected channel + - Only bundles that exist in channel "foo" AND satisfy the version range comparison are considered installable + - Automatic upgrades are constrained to upgrade edges defined by the selected channel - When unspecified, upgrade edges across all channels will be used to identify valid automatic upgrade paths. + When unspecified, upgrade edges across all channels are used to identify valid automatic upgrade paths. Some examples of valid values are: - 1.1.x @@ -836,13 +851,12 @@ spec: type: array packageName: description: |- - packageName is a reference to the name of the package to be installed - and is used to filter the content from catalogs. + packageName specifies the name of the package to be installed and is used to filter + the content from catalogs. - packageName is required, immutable, and follows the DNS subdomain standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. + It is required, immutable, and follows the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. Some examples of valid values are: - some-package @@ -869,12 +883,9 @@ spec: rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$") selector: description: |- - selector is an optional field that can be used - to filter the set of ClusterCatalogs used in the bundle - selection process. + selector is optional and filters the set of ClusterCatalogs used in the bundle selection process. - When unspecified, all ClusterCatalogs will be used in - the bundle selection process. + When unspecified, all ClusterCatalogs are used in the bundle selection process. properties: matchExpressions: description: matchExpressions is a list of label selector @@ -922,35 +933,34 @@ spec: upgradeConstraintPolicy: default: CatalogProvided description: |- - upgradeConstraintPolicy is an optional field that controls whether - the upgrade path(s) defined in the catalog are enforced for the package - referenced in the packageName field. + upgradeConstraintPolicy is optional and controls whether the upgrade paths defined in the catalog + are enforced for the package referenced in the packageName field. - Allowed values are: "CatalogProvided" or "SelfCertified", or omitted. + Allowed values are "CatalogProvided", "SelfCertified", or omitted. - When this field is set to "CatalogProvided", automatic upgrades will only occur - when upgrade constraints specified by the package author are met. + When set to "CatalogProvided", automatic upgrades only occur when upgrade constraints specified by the package + author are met. - When this field is set to "SelfCertified", the upgrade constraints specified by - the package author are ignored. This allows for upgrades and downgrades to - any version of the package. This is considered a dangerous operation as it - can lead to unknown and potentially disastrous outcomes, such as data - loss. It is assumed that users have independently verified changes when - using this option. + When set to "SelfCertified", the upgrade constraints specified by the package author are ignored. + This allows upgrades and downgrades to any version of the package. + This is considered a dangerous operation as it can lead to unknown and potentially disastrous outcomes, + such as data loss. + Use this option only if you have independently verified the changes. - When this field is omitted, the default value is "CatalogProvided". + When omitted, the default value is "CatalogProvided". enum: - CatalogProvided - SelfCertified type: string version: description: |- - version is an optional semver constraint (a specific version or range of versions). When unspecified, the latest version available will be installed. + version is an optional semver constraint (a specific version or range of versions). + When unspecified, the latest version available is installed. Acceptable version ranges are no longer than 64 characters. - Version ranges are composed of comma- or space-delimited values and one or - more comparison operators, known as comparison strings. Additional - comparison strings can be added using the OR operator (||). + Version ranges are composed of comma- or space-delimited values and one or more comparison operators, + known as comparison strings. + You can add additional comparison strings using the OR operator (||). # Range Comparisons @@ -1028,13 +1038,12 @@ spec: type: object sourceType: description: |- - sourceType is a required reference to the type of install source. + sourceType is required and specifies the type of install source. - Allowed values are "Catalog" + The only allowed value is "Catalog". - When this field is set to "Catalog", information for determining the - appropriate bundle of content to install will be fetched from - ClusterCatalog resources existing on the cluster. + When set to "Catalog", information for determining the appropriate bundle of content to install + is fetched from ClusterCatalog resources on the cluster. When using the Catalog sourceType, the catalog field must also be set. enum: - Catalog @@ -1058,23 +1067,25 @@ spec: properties: conditions: description: |- + conditions represents the current state of the ClusterExtension. + The set of condition types which apply to all spec.source variations are Installed and Progressing. - The Installed condition represents whether or not the bundle has been installed for this ClusterExtension. - When Installed is True and the Reason is Succeeded, the bundle has been successfully installed. - When Installed is False and the Reason is Failed, the bundle has failed to install. + The Installed condition represents whether the bundle has been installed for this ClusterExtension: + - When Installed is True and the Reason is Succeeded, the bundle has been successfully installed. + - When Installed is False and the Reason is Failed, the bundle has failed to install. The Progressing condition represents whether or not the ClusterExtension is advancing towards a new state. When Progressing is True and the Reason is Succeeded, the ClusterExtension is making progress towards a new state. When Progressing is True and the Reason is Retrying, the ClusterExtension has encountered an error that could be resolved on subsequent reconciliation attempts. When Progressing is False and the Reason is Blocked, the ClusterExtension has encountered an error that requires manual intervention for recovery. - When the ClusterExtension is sourced from a catalog, if may also communicate a deprecation condition. - These are indications from a package owner to guide users away from a particular package, channel, or bundle. - BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog. - ChannelDeprecated is set if the requested channel is marked deprecated in the catalog. - PackageDeprecated is set if the requested package is marked deprecated in the catalog. - Deprecated is a rollup condition that is present when any of the deprecated conditions are present. + When the ClusterExtension is sourced from a catalog, it may also communicate a deprecation condition. + These are indications from a package owner to guide users away from a particular package, channel, or bundle: + - BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog. + - ChannelDeprecated is set if the requested channel is marked deprecated in the catalog. + - PackageDeprecated is set if the requested package is marked deprecated in the catalog. + - Deprecated is a rollup condition that is present when any of the deprecated conditions are present. items: description: Condition contains details for one aspect of the current state of this API Resource. @@ -1139,17 +1150,16 @@ spec: properties: bundle: description: |- - bundle is a required field which represents the identifying attributes of a bundle. + bundle is required and represents the identifying attributes of a bundle. - A "bundle" is a versioned set of content that represents the resources that - need to be applied to a cluster to install a package. + A "bundle" is a versioned set of content that represents the resources that need to be applied + to a cluster to install a package. properties: name: description: |- - name is required and follows the DNS subdomain standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. + name is required and follows the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. type: string x-kubernetes-validations: - message: packageName must be a valid DNS1123 subdomain. @@ -1159,8 +1169,8 @@ spec: rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$") version: description: |- - version is a required field and is a reference to the version that this bundle represents - version follows the semantic versioning standard as defined in https://semver.org/. + version is required and references the version that this bundle represents. + It follows the semantic versioning standard as defined in https://semver.org/. type: string x-kubernetes-validations: - message: version must be well-formed semver diff --git a/openshift/Makefile b/openshift/Makefile index a6a0ca06a..060ab10c4 100644 --- a/openshift/Makefile +++ b/openshift/Makefile @@ -33,20 +33,20 @@ verify-manifests: manifests E2E_REGISTRY_NAME=docker-registry E2E_REGISTRY_NAMESPACE=operator-controller-e2e -export LOCAL_REGISTRY_HOST := $(E2E_REGISTRY_NAME).$(E2E_REGISTRY_NAMESPACE).svc:5000 export CLUSTER_REGISTRY_HOST := $(E2E_REGISTRY_NAME).$(E2E_REGISTRY_NAMESPACE).svc:5000 export REG_PKG_NAME := registry-operator export E2E_TEST_CATALOG_V1 := e2e/test-catalog:v1 export E2E_TEST_CATALOG_V2 := e2e/test-catalog:v2 -export CATALOG_IMG := $(LOCAL_REGISTRY_HOST)/$(E2E_TEST_CATALOG_V1) +export CATALOG_IMG := $(CLUSTER_REGISTRY_HOST)/$(E2E_TEST_CATALOG_V1) # Order matters here, the ".../registries.conf" entry must be last. -export DOWNSTREAM_E2E_FLAGS := -count=1 -v -skip 'TestClusterExtensionInstallReResolvesWhenNewCatalog|TestClusterExtensionInstallRegistryDynamic|TestClusterExtensionInstallRegistry/package_requires_mirror_registry_configuration_in_/etc/containers/registries.conf' +export DOWNSTREAM_E2E_FLAGS := -count=1 -v .PHONY: test-e2e test-e2e: ## Run the e2e tests. $(DIR)/operator-controller/build-test-registry.sh $(E2E_REGISTRY_NAMESPACE) $(E2E_REGISTRY_NAME) $(E2E_REGISTRY_IMAGE) cd $(DIR)/../; \ - go test $(DOWNSTREAM_E2E_FLAGS) ./test/e2e/...; + LOCAL_REGISTRY_HOST=$$(oc get route $(E2E_REGISTRY_NAME) -n $(E2E_REGISTRY_NAMESPACE) -o jsonpath='{.spec.host}') \ + go test $(DOWNSTREAM_E2E_FLAGS) ./test/e2e/features_test.go --godog.tags="~@mirrored-registry && ~@catalog-updates" --k8s.cli=oc export DOWNSTREAM_EXPERIMENTAL_E2E_FLAGS := -count=1 -v .PHONY: test-experimental-e2e diff --git a/openshift/catalogd/manifests-experimental.yaml b/openshift/catalogd/manifests-experimental.yaml index 4dacdee86..26856e3c1 100644 --- a/openshift/catalogd/manifests-experimental.yaml +++ b/openshift/catalogd/manifests-experimental.yaml @@ -95,7 +95,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 olm.operatorframework.io/generator: experimental name: clustercatalogs.olm.operatorframework.io spec: @@ -121,7 +121,7 @@ spec: schema: openAPIV3Schema: description: |- - ClusterCatalog enables users to make File-Based Catalog (FBC) catalog data available to the cluster. + ClusterCatalog makes File-Based Catalog (FBC) data available to your cluster. For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs properties: apiVersion: @@ -143,29 +143,24 @@ spec: type: object spec: description: |- - spec is the desired state of the ClusterCatalog. - spec is required. - The controller will work to ensure that the desired - catalog is unpacked and served over the catalog content HTTP server. + spec is a required field that defines the desired state of the ClusterCatalog. + The controller ensures that the catalog is unpacked and served over the catalog content HTTP server. properties: availabilityMode: default: Available description: |- - availabilityMode allows users to define how the ClusterCatalog is made available to clients on the cluster. - availabilityMode is optional. + availabilityMode is an optional field that defines how the ClusterCatalog is made available to clients on the cluster. - Allowed values are "Available" and "Unavailable" and omitted. + Allowed values are "Available", "Unavailable", or omitted. When omitted, the default value is "Available". - When set to "Available", the catalog contents will be unpacked and served over the catalog content HTTP server. - Setting the availabilityMode to "Available" tells clients that they should consider this ClusterCatalog - and its contents as usable. + When set to "Available", the catalog contents are unpacked and served over the catalog content HTTP server. + Clients should consider this ClusterCatalog and its contents as usable. - When set to "Unavailable", the catalog contents will no longer be served over the catalog content HTTP server. - When set to this availabilityMode it should be interpreted the same as the ClusterCatalog not existing. - Setting the availabilityMode to "Unavailable" can be useful in scenarios where a user may not want - to delete the ClusterCatalog all together, but would still like it to be treated as if it doesn't exist. + When set to "Unavailable", the catalog contents are no longer served over the catalog content HTTP server. + Treat this the same as if the ClusterCatalog does not exist. + Use "Unavailable" when you want to keep the ClusterCatalog but treat it as if it doesn't exist. enum: - Unavailable - Available @@ -173,35 +168,33 @@ spec: priority: default: 0 description: |- - priority allows the user to define a priority for a ClusterCatalog. - priority is optional. + priority is an optional field that defines a priority for this ClusterCatalog. - A ClusterCatalog's priority is used by clients as a tie-breaker between ClusterCatalogs that meet the client's requirements. - A higher number means higher priority. + Clients use the ClusterCatalog priority as a tie-breaker between ClusterCatalogs that meet their requirements. + Higher numbers mean higher priority. - It is up to clients to decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements. - When deciding how to break the tie in this scenario, it is recommended that clients prompt their users for additional input. + Clients decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements. + Clients should prompt users for additional input to break the tie. - When omitted, the default priority is 0 because that is the zero value of integers. + When omitted, the default priority is 0. - Negative numbers can be used to specify a priority lower than the default. - Positive numbers can be used to specify a priority higher than the default. + Use negative numbers to specify a priority lower than the default. + Use positive numbers to specify a priority higher than the default. The lowest possible value is -2147483648. The highest possible value is 2147483647. format: int32 + maximum: 2147483647 + minimum: -2147483648 type: integer source: description: |- - source allows a user to define the source of a catalog. - A "catalog" contains information on content that can be installed on a cluster. - Providing a catalog source makes the contents of the catalog discoverable and usable by - other on-cluster components. - These on-cluster components may do a variety of things with this information, such as - presenting the content in a GUI dashboard or installing content from the catalog on the cluster. + source is a required field that defines the source of a catalog. + A catalog contains information on content that can be installed on a cluster. + The catalog source makes catalog contents discoverable and usable by other on-cluster components. + These components can present the content in a GUI dashboard or install content from the catalog on the cluster. The catalog source must contain catalog metadata in the File-Based Catalog (FBC) format. For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs. - source is a required field. Below is a minimal example of a ClusterCatalogSpec that sources a catalog from an image: @@ -212,25 +205,23 @@ spec: properties: image: description: |- - image is used to configure how catalog contents are sourced from an OCI image. - This field is required when type is Image, and forbidden otherwise. + image configures how catalog contents are sourced from an OCI image. + It is required when type is Image, and forbidden otherwise. properties: pollIntervalMinutes: description: |- - pollIntervalMinutes allows the user to set the interval, in minutes, at which the image source should be polled for new content. - pollIntervalMinutes is optional. - pollIntervalMinutes can not be specified when ref is a digest-based reference. + pollIntervalMinutes is an optional field that sets the interval, in minutes, at which the image source is polled for new content. + You cannot specify pollIntervalMinutes when ref is a digest-based reference. - When omitted, the image will not be polled for new content. + When omitted, the image is not polled for new content. minimum: 1 type: integer ref: description: |- - ref allows users to define the reference to a container image containing Catalog contents. - ref is required. - ref can not be more than 1000 characters. + ref is a required field that defines the reference to a container image containing catalog contents. + It cannot be more than 1000 characters. - A reference can be broken down into 3 parts - the domain, name, and identifier. + A reference has 3 parts: the domain, name, and identifier. The domain is typically the registry where an image is located. It must be alphanumeric characters (lowercase and uppercase) separated by the "." character. @@ -290,12 +281,11 @@ spec: rule: 'self.ref.find(''(@.*:)'') != "" ? !has(self.pollIntervalMinutes) : true' type: description: |- - type is a reference to the type of source the catalog is sourced from. - type is required. + type is a required field that specifies the type of source for the catalog. The only allowed value is "Image". - When set to "Image", the ClusterCatalog content will be sourced from an OCI image. + When set to "Image", the ClusterCatalog content is sourced from an OCI image. When using an image source, the image field must be set and must be the only field defined for this type. enum: - Image @@ -311,31 +301,30 @@ spec: type: object status: description: |- - status contains information about the state of the ClusterCatalog such as: - - Whether or not the catalog contents are being served via the catalog content HTTP server - - Whether or not the ClusterCatalog is progressing to a new state + status contains the following information about the state of the ClusterCatalog: + - Whether the catalog contents are being served via the catalog content HTTP server + - Whether the ClusterCatalog is progressing to a new state - A reference to the source from which the catalog contents were retrieved properties: conditions: description: |- - conditions is a representation of the current state for this ClusterCatalog. + conditions represents the current state of this ClusterCatalog. The current condition types are Serving and Progressing. - The Serving condition is used to represent whether or not the contents of the catalog is being served via the HTTP(S) web server. - When it has a status of True and a reason of Available, the contents of the catalog are being served. - When it has a status of False and a reason of Unavailable, the contents of the catalog are not being served because the contents are not yet available. - When it has a status of False and a reason of UserSpecifiedUnavailable, the contents of the catalog are not being served because the catalog has been intentionally marked as unavailable. + The Serving condition represents whether the catalog contents are being served via the HTTP(S) web server: + - When status is True and reason is Available, the catalog contents are being served. + - When status is False and reason is Unavailable, the catalog contents are not being served because the contents are not yet available. + - When status is False and reason is UserSpecifiedUnavailable, the catalog contents are not being served because the catalog has been intentionally marked as unavailable. - The Progressing condition is used to represent whether or not the ClusterCatalog is progressing or is ready to progress towards a new state. - When it has a status of True and a reason of Retrying, there was an error in the progression of the ClusterCatalog that may be resolved on subsequent reconciliation attempts. - When it has a status of True and a reason of Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing. - When it has a status of False and a reason of Blocked, there was an error in the progression of the ClusterCatalog that requires manual intervention for recovery. + The Progressing condition represents whether the ClusterCatalog is progressing or is ready to progress towards a new state: + - When status is True and reason is Retrying, an error occurred that may be resolved on subsequent reconciliation attempts. + - When status is True and reason is Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing. + - When status is False and reason is Blocked, an error occurred that requires manual intervention for recovery. - In the case that the Serving condition is True with reason Available and Progressing is True with reason Retrying, the previously fetched - catalog contents are still being served via the HTTP(S) web server while we are progressing towards serving a new version of the catalog - contents. This could occur when we've initially fetched the latest contents from the source for this catalog and when polling for changes - to the contents we identify that there are updates to the contents. + If the system initially fetched contents and polling identifies updates, both conditions can be active simultaneously: + - The Serving condition remains True with reason Available because the previous contents are still served via the HTTP(S) web server. + - The Progressing condition is True with reason Retrying because the system is working to serve the new version. items: description: Condition contains details for one aspect of the current state of this API Resource. properties: @@ -395,11 +384,9 @@ spec: x-kubernetes-list-type: map lastUnpacked: description: |- - lastUnpacked represents the last time the contents of the - catalog were extracted from their source format. As an example, - when using an Image source, the OCI image will be pulled and the - image layers written to a file-system backed cache. We refer to the - act of this extraction from the source format as "unpacking". + lastUnpacked represents the last time the catalog contents were extracted from their source format. + For example, when using an Image source, the OCI image is pulled and image layers are written to a file-system backed cache. + This extraction from the source format is called "unpacking". format: date-time type: string resolvedSource: @@ -407,14 +394,14 @@ spec: properties: image: description: |- - image is a field containing resolution information for a catalog sourced from an image. - This field must be set when type is Image, and forbidden otherwise. + image contains resolution information for a catalog sourced from an image. + It must be set when type is Image, and forbidden otherwise. properties: ref: description: |- ref contains the resolved image digest-based reference. - The digest format is used so users can use other tooling to fetch the exact - OCI manifests that were used to extract the catalog contents. + The digest format allows you to use other tooling to fetch the exact OCI manifests + that were used to extract the catalog contents. maxLength: 1000 type: string x-kubernetes-validations: @@ -435,12 +422,11 @@ spec: type: object type: description: |- - type is a reference to the type of source the catalog is sourced from. - type is required. + type is a required field that specifies the type of source for the catalog. The only allowed value is "Image". - When set to "Image", information about the resolved image source will be set in the 'image' field. + When set to "Image", information about the resolved image source is set in the image field. enum: - Image type: string @@ -456,19 +442,16 @@ spec: properties: base: description: |- - base is a cluster-internal URL that provides endpoints for - accessing the content of the catalog. + base is a cluster-internal URL that provides endpoints for accessing the catalog content. - It is expected that clients append the path for the endpoint they wish - to access. + Clients should append the path for the endpoint they want to access. - Currently, only a single endpoint is served and is accessible at the path - /api/v1. + Currently, only a single endpoint is served and is accessible at the path /api/v1. The endpoints served for the v1 API are: - - /all - this endpoint returns the entirety of the catalog contents in the FBC format + - /all - this endpoint returns the entire catalog contents in the FBC format - As the needs of users and clients of the evolve, new endpoints may be added. + New endpoints may be added as needs evolve. maxLength: 525 type: string x-kubernetes-validations: diff --git a/openshift/catalogd/manifests.yaml b/openshift/catalogd/manifests.yaml index 68b6c87f3..04a50980f 100644 --- a/openshift/catalogd/manifests.yaml +++ b/openshift/catalogd/manifests.yaml @@ -95,7 +95,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 olm.operatorframework.io/generator: standard name: clustercatalogs.olm.operatorframework.io spec: @@ -121,7 +121,7 @@ spec: schema: openAPIV3Schema: description: |- - ClusterCatalog enables users to make File-Based Catalog (FBC) catalog data available to the cluster. + ClusterCatalog makes File-Based Catalog (FBC) data available to your cluster. For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs properties: apiVersion: @@ -143,29 +143,24 @@ spec: type: object spec: description: |- - spec is the desired state of the ClusterCatalog. - spec is required. - The controller will work to ensure that the desired - catalog is unpacked and served over the catalog content HTTP server. + spec is a required field that defines the desired state of the ClusterCatalog. + The controller ensures that the catalog is unpacked and served over the catalog content HTTP server. properties: availabilityMode: default: Available description: |- - availabilityMode allows users to define how the ClusterCatalog is made available to clients on the cluster. - availabilityMode is optional. + availabilityMode is an optional field that defines how the ClusterCatalog is made available to clients on the cluster. - Allowed values are "Available" and "Unavailable" and omitted. + Allowed values are "Available", "Unavailable", or omitted. When omitted, the default value is "Available". - When set to "Available", the catalog contents will be unpacked and served over the catalog content HTTP server. - Setting the availabilityMode to "Available" tells clients that they should consider this ClusterCatalog - and its contents as usable. + When set to "Available", the catalog contents are unpacked and served over the catalog content HTTP server. + Clients should consider this ClusterCatalog and its contents as usable. - When set to "Unavailable", the catalog contents will no longer be served over the catalog content HTTP server. - When set to this availabilityMode it should be interpreted the same as the ClusterCatalog not existing. - Setting the availabilityMode to "Unavailable" can be useful in scenarios where a user may not want - to delete the ClusterCatalog all together, but would still like it to be treated as if it doesn't exist. + When set to "Unavailable", the catalog contents are no longer served over the catalog content HTTP server. + Treat this the same as if the ClusterCatalog does not exist. + Use "Unavailable" when you want to keep the ClusterCatalog but treat it as if it doesn't exist. enum: - Unavailable - Available @@ -173,35 +168,33 @@ spec: priority: default: 0 description: |- - priority allows the user to define a priority for a ClusterCatalog. - priority is optional. + priority is an optional field that defines a priority for this ClusterCatalog. - A ClusterCatalog's priority is used by clients as a tie-breaker between ClusterCatalogs that meet the client's requirements. - A higher number means higher priority. + Clients use the ClusterCatalog priority as a tie-breaker between ClusterCatalogs that meet their requirements. + Higher numbers mean higher priority. - It is up to clients to decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements. - When deciding how to break the tie in this scenario, it is recommended that clients prompt their users for additional input. + Clients decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements. + Clients should prompt users for additional input to break the tie. - When omitted, the default priority is 0 because that is the zero value of integers. + When omitted, the default priority is 0. - Negative numbers can be used to specify a priority lower than the default. - Positive numbers can be used to specify a priority higher than the default. + Use negative numbers to specify a priority lower than the default. + Use positive numbers to specify a priority higher than the default. The lowest possible value is -2147483648. The highest possible value is 2147483647. format: int32 + maximum: 2147483647 + minimum: -2147483648 type: integer source: description: |- - source allows a user to define the source of a catalog. - A "catalog" contains information on content that can be installed on a cluster. - Providing a catalog source makes the contents of the catalog discoverable and usable by - other on-cluster components. - These on-cluster components may do a variety of things with this information, such as - presenting the content in a GUI dashboard or installing content from the catalog on the cluster. + source is a required field that defines the source of a catalog. + A catalog contains information on content that can be installed on a cluster. + The catalog source makes catalog contents discoverable and usable by other on-cluster components. + These components can present the content in a GUI dashboard or install content from the catalog on the cluster. The catalog source must contain catalog metadata in the File-Based Catalog (FBC) format. For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs. - source is a required field. Below is a minimal example of a ClusterCatalogSpec that sources a catalog from an image: @@ -212,25 +205,23 @@ spec: properties: image: description: |- - image is used to configure how catalog contents are sourced from an OCI image. - This field is required when type is Image, and forbidden otherwise. + image configures how catalog contents are sourced from an OCI image. + It is required when type is Image, and forbidden otherwise. properties: pollIntervalMinutes: description: |- - pollIntervalMinutes allows the user to set the interval, in minutes, at which the image source should be polled for new content. - pollIntervalMinutes is optional. - pollIntervalMinutes can not be specified when ref is a digest-based reference. + pollIntervalMinutes is an optional field that sets the interval, in minutes, at which the image source is polled for new content. + You cannot specify pollIntervalMinutes when ref is a digest-based reference. - When omitted, the image will not be polled for new content. + When omitted, the image is not polled for new content. minimum: 1 type: integer ref: description: |- - ref allows users to define the reference to a container image containing Catalog contents. - ref is required. - ref can not be more than 1000 characters. + ref is a required field that defines the reference to a container image containing catalog contents. + It cannot be more than 1000 characters. - A reference can be broken down into 3 parts - the domain, name, and identifier. + A reference has 3 parts: the domain, name, and identifier. The domain is typically the registry where an image is located. It must be alphanumeric characters (lowercase and uppercase) separated by the "." character. @@ -290,12 +281,11 @@ spec: rule: 'self.ref.find(''(@.*:)'') != "" ? !has(self.pollIntervalMinutes) : true' type: description: |- - type is a reference to the type of source the catalog is sourced from. - type is required. + type is a required field that specifies the type of source for the catalog. The only allowed value is "Image". - When set to "Image", the ClusterCatalog content will be sourced from an OCI image. + When set to "Image", the ClusterCatalog content is sourced from an OCI image. When using an image source, the image field must be set and must be the only field defined for this type. enum: - Image @@ -311,31 +301,30 @@ spec: type: object status: description: |- - status contains information about the state of the ClusterCatalog such as: - - Whether or not the catalog contents are being served via the catalog content HTTP server - - Whether or not the ClusterCatalog is progressing to a new state + status contains the following information about the state of the ClusterCatalog: + - Whether the catalog contents are being served via the catalog content HTTP server + - Whether the ClusterCatalog is progressing to a new state - A reference to the source from which the catalog contents were retrieved properties: conditions: description: |- - conditions is a representation of the current state for this ClusterCatalog. + conditions represents the current state of this ClusterCatalog. The current condition types are Serving and Progressing. - The Serving condition is used to represent whether or not the contents of the catalog is being served via the HTTP(S) web server. - When it has a status of True and a reason of Available, the contents of the catalog are being served. - When it has a status of False and a reason of Unavailable, the contents of the catalog are not being served because the contents are not yet available. - When it has a status of False and a reason of UserSpecifiedUnavailable, the contents of the catalog are not being served because the catalog has been intentionally marked as unavailable. + The Serving condition represents whether the catalog contents are being served via the HTTP(S) web server: + - When status is True and reason is Available, the catalog contents are being served. + - When status is False and reason is Unavailable, the catalog contents are not being served because the contents are not yet available. + - When status is False and reason is UserSpecifiedUnavailable, the catalog contents are not being served because the catalog has been intentionally marked as unavailable. - The Progressing condition is used to represent whether or not the ClusterCatalog is progressing or is ready to progress towards a new state. - When it has a status of True and a reason of Retrying, there was an error in the progression of the ClusterCatalog that may be resolved on subsequent reconciliation attempts. - When it has a status of True and a reason of Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing. - When it has a status of False and a reason of Blocked, there was an error in the progression of the ClusterCatalog that requires manual intervention for recovery. + The Progressing condition represents whether the ClusterCatalog is progressing or is ready to progress towards a new state: + - When status is True and reason is Retrying, an error occurred that may be resolved on subsequent reconciliation attempts. + - When status is True and reason is Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing. + - When status is False and reason is Blocked, an error occurred that requires manual intervention for recovery. - In the case that the Serving condition is True with reason Available and Progressing is True with reason Retrying, the previously fetched - catalog contents are still being served via the HTTP(S) web server while we are progressing towards serving a new version of the catalog - contents. This could occur when we've initially fetched the latest contents from the source for this catalog and when polling for changes - to the contents we identify that there are updates to the contents. + If the system initially fetched contents and polling identifies updates, both conditions can be active simultaneously: + - The Serving condition remains True with reason Available because the previous contents are still served via the HTTP(S) web server. + - The Progressing condition is True with reason Retrying because the system is working to serve the new version. items: description: Condition contains details for one aspect of the current state of this API Resource. properties: @@ -395,11 +384,9 @@ spec: x-kubernetes-list-type: map lastUnpacked: description: |- - lastUnpacked represents the last time the contents of the - catalog were extracted from their source format. As an example, - when using an Image source, the OCI image will be pulled and the - image layers written to a file-system backed cache. We refer to the - act of this extraction from the source format as "unpacking". + lastUnpacked represents the last time the catalog contents were extracted from their source format. + For example, when using an Image source, the OCI image is pulled and image layers are written to a file-system backed cache. + This extraction from the source format is called "unpacking". format: date-time type: string resolvedSource: @@ -407,14 +394,14 @@ spec: properties: image: description: |- - image is a field containing resolution information for a catalog sourced from an image. - This field must be set when type is Image, and forbidden otherwise. + image contains resolution information for a catalog sourced from an image. + It must be set when type is Image, and forbidden otherwise. properties: ref: description: |- ref contains the resolved image digest-based reference. - The digest format is used so users can use other tooling to fetch the exact - OCI manifests that were used to extract the catalog contents. + The digest format allows you to use other tooling to fetch the exact OCI manifests + that were used to extract the catalog contents. maxLength: 1000 type: string x-kubernetes-validations: @@ -435,12 +422,11 @@ spec: type: object type: description: |- - type is a reference to the type of source the catalog is sourced from. - type is required. + type is a required field that specifies the type of source for the catalog. The only allowed value is "Image". - When set to "Image", information about the resolved image source will be set in the 'image' field. + When set to "Image", information about the resolved image source is set in the image field. enum: - Image type: string @@ -456,19 +442,16 @@ spec: properties: base: description: |- - base is a cluster-internal URL that provides endpoints for - accessing the content of the catalog. + base is a cluster-internal URL that provides endpoints for accessing the catalog content. - It is expected that clients append the path for the endpoint they wish - to access. + Clients should append the path for the endpoint they want to access. - Currently, only a single endpoint is served and is accessible at the path - /api/v1. + Currently, only a single endpoint is served and is accessible at the path /api/v1. The endpoints served for the v1 API are: - - /all - this endpoint returns the entirety of the catalog contents in the FBC format + - /all - this endpoint returns the entire catalog contents in the FBC format - As the needs of users and clients of the evolve, new endpoints may be added. + New endpoints may be added as needs evolve. maxLength: 525 type: string x-kubernetes-validations: diff --git a/openshift/operator-controller/build-test-registry.sh b/openshift/operator-controller/build-test-registry.sh index 2df347ee3..2d8d5c229 100755 --- a/openshift/operator-controller/build-test-registry.sh +++ b/openshift/operator-controller/build-test-registry.sh @@ -81,7 +81,7 @@ spec: - name: http port: 5000 targetPort: 5000 - type: NodePort + type: ClusterIP EOF oc wait --for=condition=Available -n "${namespace}" "deploy/${name}" --timeout=60s @@ -118,3 +118,7 @@ spec: EOF oc wait --for=condition=Complete -n "${namespace}" "job/${name}-push" --timeout=60s + +oc create route passthrough ${name} --service ${name} -n ${namespace} + +oc get route ${name} -n ${namespace} -o yaml diff --git a/openshift/operator-controller/manifests-experimental.yaml b/openshift/operator-controller/manifests-experimental.yaml index 7ccb483b7..d785abfd0 100644 --- a/openshift/operator-controller/manifests-experimental.yaml +++ b/openshift/operator-controller/manifests-experimental.yaml @@ -91,7 +91,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 olm.operatorframework.io/generator: experimental name: clusterextensions.olm.operatorframework.io spec: @@ -146,9 +146,9 @@ spec: properties: config: description: |- - config is an optional field used to specify bundle specific configuration - used to configure the bundle. Configuration is bundle specific and a bundle may provide - a configuration schema. When not specified, the default configuration of the resolved bundle will be used. + config is optional and specifies bundle-specific configuration. + Configuration is bundle-specific and a bundle may provide a configuration schema. + When not specified, the default configuration of the resolved bundle is used. config is validated against a configuration schema provided by the resolved bundle. If the bundle does not provide a configuration schema the bundle is deemed to not be configurable. More information on how @@ -156,21 +156,19 @@ spec: properties: configType: description: |- - configType is a required reference to the type of configuration source. + configType is required and specifies the type of configuration source. - Allowed values are "Inline" + The only allowed value is "Inline". - When this field is set to "Inline", the cluster extension configuration is defined inline within the - ClusterExtension resource. + When set to "Inline", the cluster extension configuration is defined inline within the ClusterExtension resource. enum: - Inline type: string inline: description: |- - inline contains JSON or YAML values specified directly in the - ClusterExtension. + inline contains JSON or YAML values specified directly in the ClusterExtension. - inline is used to specify arbitrary configuration values for the ClusterExtension. + It is used to specify arbitrary configuration values for the ClusterExtension. It must be set if configType is 'Inline' and must be a valid JSON/YAML object containing at least one property. The configuration values are validated at runtime against a JSON schema provided by the bundle. minProperties: 1 @@ -184,37 +182,35 @@ spec: rule: 'has(self.configType) && self.configType == ''Inline'' ?has(self.inline) : !has(self.inline)' install: description: |- - install is an optional field used to configure the installation options - for the ClusterExtension such as the pre-flight check configuration. + install is optional and configures installation options for the ClusterExtension, + such as the pre-flight check configuration. properties: preflight: description: |- - preflight is an optional field that can be used to configure the checks that are - run before installation or upgrade of the content for the package specified in the packageName field. + preflight is optional and configures the checks that run before installation or upgrade + of the content for the package specified in the packageName field. When specified, it replaces the default preflight configuration for install/upgrade actions. - When not specified, the default configuration will be used. + When not specified, the default configuration is used. properties: crdUpgradeSafety: description: |- - crdUpgradeSafety is used to configure the CRD Upgrade Safety pre-flight - checks that run prior to upgrades of installed content. + crdUpgradeSafety configures the CRD Upgrade Safety pre-flight checks that run + before upgrades of installed content. - The CRD Upgrade Safety pre-flight check safeguards from unintended - consequences of upgrading a CRD, such as data loss. + The CRD Upgrade Safety pre-flight check safeguards from unintended consequences of upgrading a CRD, + such as data loss. properties: enforcement: description: |- - enforcement is a required field, used to configure the state of the CRD Upgrade Safety pre-flight check. + enforcement is required and configures the state of the CRD Upgrade Safety pre-flight check. Allowed values are "None" or "Strict". The default value is "Strict". - When set to "None", the CRD Upgrade Safety pre-flight check will be skipped - when performing an upgrade operation. This should be used with caution as - unintended consequences such as data loss can occur. + When set to "None", the CRD Upgrade Safety pre-flight check is skipped during an upgrade operation. + Use this option with caution as unintended consequences such as data loss can occur. - When set to "Strict", the CRD Upgrade Safety pre-flight check will be run when - performing an upgrade operation. + When set to "Strict", the CRD Upgrade Safety pre-flight check runs during an upgrade operation. enum: - None - Strict @@ -234,16 +230,15 @@ spec: rule: has(self.preflight) namespace: description: |- - namespace is a reference to a Kubernetes namespace. - This is the namespace in which the provided ServiceAccount must exist. - It also designates the default namespace where namespace-scoped resources - for the extension are applied to the cluster. + namespace specifies a Kubernetes namespace. + This is the namespace where the provided ServiceAccount must exist. + It also designates the default namespace where namespace-scoped resources for the extension are applied to the cluster. Some extensions may contain namespace-scoped resources to be applied in other namespaces. This namespace must exist. - namespace is required, immutable, and follows the DNS label standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters or hyphens (-), - start and end with an alphanumeric character, and be no longer than 63 characters + The namespace field is required, immutable, and follows the DNS label standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters or hyphens (-), start and end with an alphanumeric character, + and be no longer than 63 characters. [RFC 1123]: https://tools.ietf.org/html/rfc1123 maxLength: 63 @@ -255,24 +250,22 @@ spec: rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?$") serviceAccount: description: |- - serviceAccount is a reference to a ServiceAccount used to perform all interactions - with the cluster that are required to manage the extension. + serviceAccount specifies a ServiceAccount used to perform all interactions with the cluster + that are required to manage the extension. The ServiceAccount must be configured with the necessary permissions to perform these interactions. The ServiceAccount must exist in the namespace referenced in the spec. - serviceAccount is required. + The serviceAccount field is required. properties: name: description: |- - name is a required, immutable reference to the name of the ServiceAccount - to be used for installation and management of the content for the package - specified in the packageName field. + name is a required, immutable reference to the name of the ServiceAccount used for installation + and management of the content for the package specified in the packageName field. This ServiceAccount must exist in the installNamespace. - name follows the DNS subdomain standard as defined in [RFC 1123]. - It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. + The name field follows the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. Some examples of valid values are: - some-serviceaccount @@ -298,11 +291,11 @@ spec: type: object source: description: |- - source is a required field which selects the installation source of content - for this ClusterExtension. Selection is performed by setting the sourceType. + source is required and selects the installation source of content for this ClusterExtension. + Set the sourceType field to perform the selection. - Catalog is currently the only implemented sourceType, and setting the - sourcetype to "Catalog" requires the catalog field to also be defined. + Catalog is currently the only implemented sourceType. + Setting sourceType to "Catalog" requires the catalog field to also be defined. Below is a minimal example of a source definition (in yaml): @@ -313,30 +306,29 @@ spec: properties: catalog: description: |- - catalog is used to configure how information is sourced from a catalog. - This field is required when sourceType is "Catalog", and forbidden otherwise. + catalog configures how information is sourced from a catalog. + It is required when sourceType is "Catalog", and forbidden otherwise. properties: channels: description: |- - channels is an optional reference to a set of channels belonging to - the package specified in the packageName field. + channels is optional and specifies a set of channels belonging to the package + specified in the packageName field. - A "channel" is a package-author-defined stream of updates for an extension. + A channel is a package-author-defined stream of updates for an extension. - Each channel in the list must follow the DNS subdomain standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. No more than 256 channels can be specified. + Each channel in the list must follow the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. + You can specify no more than 256 channels. - When specified, it is used to constrain the set of installable bundles and - the automated upgrade path. This constraint is an AND operation with the - version field. For example: + When specified, it constrains the set of installable bundles and the automated upgrade path. + This constraint is an AND operation with the version field. For example: - Given channel is set to "foo" - Given version is set to ">=1.0.0, <1.5.0" - - Only bundles that exist in channel "foo" AND satisfy the version range comparison will be considered installable - - Automatic upgrades will be constrained to upgrade edges defined by the selected channel + - Only bundles that exist in channel "foo" AND satisfy the version range comparison are considered installable + - Automatic upgrades are constrained to upgrade edges defined by the selected channel - When unspecified, upgrade edges across all channels will be used to identify valid automatic upgrade paths. + When unspecified, upgrade edges across all channels are used to identify valid automatic upgrade paths. Some examples of valid values are: - 1.1.x @@ -366,13 +358,12 @@ spec: type: array packageName: description: |- - packageName is a reference to the name of the package to be installed - and is used to filter the content from catalogs. + packageName specifies the name of the package to be installed and is used to filter + the content from catalogs. - packageName is required, immutable, and follows the DNS subdomain standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. + It is required, immutable, and follows the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. Some examples of valid values are: - some-package @@ -396,12 +387,9 @@ spec: rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$") selector: description: |- - selector is an optional field that can be used - to filter the set of ClusterCatalogs used in the bundle - selection process. + selector is optional and filters the set of ClusterCatalogs used in the bundle selection process. - When unspecified, all ClusterCatalogs will be used in - the bundle selection process. + When unspecified, all ClusterCatalogs are used in the bundle selection process. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. @@ -447,35 +435,34 @@ spec: upgradeConstraintPolicy: default: CatalogProvided description: |- - upgradeConstraintPolicy is an optional field that controls whether - the upgrade path(s) defined in the catalog are enforced for the package - referenced in the packageName field. + upgradeConstraintPolicy is optional and controls whether the upgrade paths defined in the catalog + are enforced for the package referenced in the packageName field. - Allowed values are: "CatalogProvided" or "SelfCertified", or omitted. + Allowed values are "CatalogProvided", "SelfCertified", or omitted. - When this field is set to "CatalogProvided", automatic upgrades will only occur - when upgrade constraints specified by the package author are met. + When set to "CatalogProvided", automatic upgrades only occur when upgrade constraints specified by the package + author are met. - When this field is set to "SelfCertified", the upgrade constraints specified by - the package author are ignored. This allows for upgrades and downgrades to - any version of the package. This is considered a dangerous operation as it - can lead to unknown and potentially disastrous outcomes, such as data - loss. It is assumed that users have independently verified changes when - using this option. + When set to "SelfCertified", the upgrade constraints specified by the package author are ignored. + This allows upgrades and downgrades to any version of the package. + This is considered a dangerous operation as it can lead to unknown and potentially disastrous outcomes, + such as data loss. + Use this option only if you have independently verified the changes. - When this field is omitted, the default value is "CatalogProvided". + When omitted, the default value is "CatalogProvided". enum: - CatalogProvided - SelfCertified type: string version: description: |- - version is an optional semver constraint (a specific version or range of versions). When unspecified, the latest version available will be installed. + version is an optional semver constraint (a specific version or range of versions). + When unspecified, the latest version available is installed. Acceptable version ranges are no longer than 64 characters. - Version ranges are composed of comma- or space-delimited values and one or - more comparison operators, known as comparison strings. Additional - comparison strings can be added using the OR operator (||). + Version ranges are composed of comma- or space-delimited values and one or more comparison operators, + known as comparison strings. + You can add additional comparison strings using the OR operator (||). # Range Comparisons @@ -553,13 +540,12 @@ spec: type: object sourceType: description: |- - sourceType is a required reference to the type of install source. + sourceType is required and specifies the type of install source. - Allowed values are "Catalog" + The only allowed value is "Catalog". - When this field is set to "Catalog", information for determining the - appropriate bundle of content to install will be fetched from - ClusterCatalog resources existing on the cluster. + When set to "Catalog", information for determining the appropriate bundle of content to install + is fetched from ClusterCatalog resources on the cluster. When using the Catalog sourceType, the catalog field must also be set. enum: - Catalog @@ -660,11 +646,13 @@ spec: x-kubernetes-list-type: map conditions: description: |- + conditions represents the current state of the ClusterExtension. + The set of condition types which apply to all spec.source variations are Installed and Progressing. - The Installed condition represents whether or not the bundle has been installed for this ClusterExtension. - When Installed is True and the Reason is Succeeded, the bundle has been successfully installed. - When Installed is False and the Reason is Failed, the bundle has failed to install. + The Installed condition represents whether the bundle has been installed for this ClusterExtension: + - When Installed is True and the Reason is Succeeded, the bundle has been successfully installed. + - When Installed is False and the Reason is Failed, the bundle has failed to install. The Progressing condition represents whether or not the ClusterExtension is advancing towards a new state. When Progressing is True and the Reason is Succeeded, the ClusterExtension is making progress towards a new state. @@ -673,12 +661,12 @@ spec: When Progressing is True and Reason is RollingOut, the ClusterExtension has one or more ClusterExtensionRevisions in active roll out. - When the ClusterExtension is sourced from a catalog, if may also communicate a deprecation condition. - These are indications from a package owner to guide users away from a particular package, channel, or bundle. - BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog. - ChannelDeprecated is set if the requested channel is marked deprecated in the catalog. - PackageDeprecated is set if the requested package is marked deprecated in the catalog. - Deprecated is a rollup condition that is present when any of the deprecated conditions are present. + When the ClusterExtension is sourced from a catalog, it may also communicate a deprecation condition. + These are indications from a package owner to guide users away from a particular package, channel, or bundle: + - BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog. + - ChannelDeprecated is set if the requested channel is marked deprecated in the catalog. + - PackageDeprecated is set if the requested package is marked deprecated in the catalog. + - Deprecated is a rollup condition that is present when any of the deprecated conditions are present. items: description: Condition contains details for one aspect of the current state of this API Resource. properties: @@ -741,25 +729,24 @@ spec: properties: bundle: description: |- - bundle is a required field which represents the identifying attributes of a bundle. + bundle is required and represents the identifying attributes of a bundle. - A "bundle" is a versioned set of content that represents the resources that - need to be applied to a cluster to install a package. + A "bundle" is a versioned set of content that represents the resources that need to be applied + to a cluster to install a package. properties: name: description: |- - name is required and follows the DNS subdomain standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. + name is required and follows the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. type: string x-kubernetes-validations: - message: packageName must be a valid DNS1123 subdomain. It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), start and end with an alphanumeric character, and be no longer than 253 characters rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$") version: description: |- - version is a required field and is a reference to the version that this bundle represents - version follows the semantic versioning standard as defined in https://semver.org/. + version is required and references the version that this bundle represents. + It follows the semantic versioning standard as defined in https://semver.org/. type: string x-kubernetes-validations: - message: version must be well-formed semver diff --git a/openshift/operator-controller/manifests.yaml b/openshift/operator-controller/manifests.yaml index 091dfe26a..529c0f648 100644 --- a/openshift/operator-controller/manifests.yaml +++ b/openshift/operator-controller/manifests.yaml @@ -91,7 +91,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 olm.operatorframework.io/generator: standard name: clusterextensions.olm.operatorframework.io spec: @@ -144,39 +144,73 @@ spec: spec: description: spec is an optional field that defines the desired state of the ClusterExtension. properties: + config: + description: |- + config is optional and specifies bundle-specific configuration. + Configuration is bundle-specific and a bundle may provide a configuration schema. + When not specified, the default configuration of the resolved bundle is used. + + config is validated against a configuration schema provided by the resolved bundle. If the bundle does not provide + a configuration schema the bundle is deemed to not be configurable. More information on how + to configure bundles can be found in the OLM documentation associated with your current OLM version. + properties: + configType: + description: |- + configType is required and specifies the type of configuration source. + + The only allowed value is "Inline". + + When set to "Inline", the cluster extension configuration is defined inline within the ClusterExtension resource. + enum: + - Inline + type: string + inline: + description: |- + inline contains JSON or YAML values specified directly in the ClusterExtension. + + It is used to specify arbitrary configuration values for the ClusterExtension. + It must be set if configType is 'Inline' and must be a valid JSON/YAML object containing at least one property. + The configuration values are validated at runtime against a JSON schema provided by the bundle. + minProperties: 1 + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - configType + type: object + x-kubernetes-validations: + - message: inline is required when configType is Inline, and forbidden otherwise + rule: 'has(self.configType) && self.configType == ''Inline'' ?has(self.inline) : !has(self.inline)' install: description: |- - install is an optional field used to configure the installation options - for the ClusterExtension such as the pre-flight check configuration. + install is optional and configures installation options for the ClusterExtension, + such as the pre-flight check configuration. properties: preflight: description: |- - preflight is an optional field that can be used to configure the checks that are - run before installation or upgrade of the content for the package specified in the packageName field. + preflight is optional and configures the checks that run before installation or upgrade + of the content for the package specified in the packageName field. When specified, it replaces the default preflight configuration for install/upgrade actions. - When not specified, the default configuration will be used. + When not specified, the default configuration is used. properties: crdUpgradeSafety: description: |- - crdUpgradeSafety is used to configure the CRD Upgrade Safety pre-flight - checks that run prior to upgrades of installed content. + crdUpgradeSafety configures the CRD Upgrade Safety pre-flight checks that run + before upgrades of installed content. - The CRD Upgrade Safety pre-flight check safeguards from unintended - consequences of upgrading a CRD, such as data loss. + The CRD Upgrade Safety pre-flight check safeguards from unintended consequences of upgrading a CRD, + such as data loss. properties: enforcement: description: |- - enforcement is a required field, used to configure the state of the CRD Upgrade Safety pre-flight check. + enforcement is required and configures the state of the CRD Upgrade Safety pre-flight check. Allowed values are "None" or "Strict". The default value is "Strict". - When set to "None", the CRD Upgrade Safety pre-flight check will be skipped - when performing an upgrade operation. This should be used with caution as - unintended consequences such as data loss can occur. + When set to "None", the CRD Upgrade Safety pre-flight check is skipped during an upgrade operation. + Use this option with caution as unintended consequences such as data loss can occur. - When set to "Strict", the CRD Upgrade Safety pre-flight check will be run when - performing an upgrade operation. + When set to "Strict", the CRD Upgrade Safety pre-flight check runs during an upgrade operation. enum: - None - Strict @@ -196,16 +230,15 @@ spec: rule: has(self.preflight) namespace: description: |- - namespace is a reference to a Kubernetes namespace. - This is the namespace in which the provided ServiceAccount must exist. - It also designates the default namespace where namespace-scoped resources - for the extension are applied to the cluster. + namespace specifies a Kubernetes namespace. + This is the namespace where the provided ServiceAccount must exist. + It also designates the default namespace where namespace-scoped resources for the extension are applied to the cluster. Some extensions may contain namespace-scoped resources to be applied in other namespaces. This namespace must exist. - namespace is required, immutable, and follows the DNS label standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters or hyphens (-), - start and end with an alphanumeric character, and be no longer than 63 characters + The namespace field is required, immutable, and follows the DNS label standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters or hyphens (-), start and end with an alphanumeric character, + and be no longer than 63 characters. [RFC 1123]: https://tools.ietf.org/html/rfc1123 maxLength: 63 @@ -217,24 +250,22 @@ spec: rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?$") serviceAccount: description: |- - serviceAccount is a reference to a ServiceAccount used to perform all interactions - with the cluster that are required to manage the extension. + serviceAccount specifies a ServiceAccount used to perform all interactions with the cluster + that are required to manage the extension. The ServiceAccount must be configured with the necessary permissions to perform these interactions. The ServiceAccount must exist in the namespace referenced in the spec. - serviceAccount is required. + The serviceAccount field is required. properties: name: description: |- - name is a required, immutable reference to the name of the ServiceAccount - to be used for installation and management of the content for the package - specified in the packageName field. + name is a required, immutable reference to the name of the ServiceAccount used for installation + and management of the content for the package specified in the packageName field. This ServiceAccount must exist in the installNamespace. - name follows the DNS subdomain standard as defined in [RFC 1123]. - It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. + The name field follows the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. Some examples of valid values are: - some-serviceaccount @@ -260,11 +291,11 @@ spec: type: object source: description: |- - source is a required field which selects the installation source of content - for this ClusterExtension. Selection is performed by setting the sourceType. + source is required and selects the installation source of content for this ClusterExtension. + Set the sourceType field to perform the selection. - Catalog is currently the only implemented sourceType, and setting the - sourcetype to "Catalog" requires the catalog field to also be defined. + Catalog is currently the only implemented sourceType. + Setting sourceType to "Catalog" requires the catalog field to also be defined. Below is a minimal example of a source definition (in yaml): @@ -275,30 +306,29 @@ spec: properties: catalog: description: |- - catalog is used to configure how information is sourced from a catalog. - This field is required when sourceType is "Catalog", and forbidden otherwise. + catalog configures how information is sourced from a catalog. + It is required when sourceType is "Catalog", and forbidden otherwise. properties: channels: description: |- - channels is an optional reference to a set of channels belonging to - the package specified in the packageName field. + channels is optional and specifies a set of channels belonging to the package + specified in the packageName field. - A "channel" is a package-author-defined stream of updates for an extension. + A channel is a package-author-defined stream of updates for an extension. - Each channel in the list must follow the DNS subdomain standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. No more than 256 channels can be specified. + Each channel in the list must follow the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. + You can specify no more than 256 channels. - When specified, it is used to constrain the set of installable bundles and - the automated upgrade path. This constraint is an AND operation with the - version field. For example: + When specified, it constrains the set of installable bundles and the automated upgrade path. + This constraint is an AND operation with the version field. For example: - Given channel is set to "foo" - Given version is set to ">=1.0.0, <1.5.0" - - Only bundles that exist in channel "foo" AND satisfy the version range comparison will be considered installable - - Automatic upgrades will be constrained to upgrade edges defined by the selected channel + - Only bundles that exist in channel "foo" AND satisfy the version range comparison are considered installable + - Automatic upgrades are constrained to upgrade edges defined by the selected channel - When unspecified, upgrade edges across all channels will be used to identify valid automatic upgrade paths. + When unspecified, upgrade edges across all channels are used to identify valid automatic upgrade paths. Some examples of valid values are: - 1.1.x @@ -328,13 +358,12 @@ spec: type: array packageName: description: |- - packageName is a reference to the name of the package to be installed - and is used to filter the content from catalogs. + packageName specifies the name of the package to be installed and is used to filter + the content from catalogs. - packageName is required, immutable, and follows the DNS subdomain standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. + It is required, immutable, and follows the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. Some examples of valid values are: - some-package @@ -358,12 +387,9 @@ spec: rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$") selector: description: |- - selector is an optional field that can be used - to filter the set of ClusterCatalogs used in the bundle - selection process. + selector is optional and filters the set of ClusterCatalogs used in the bundle selection process. - When unspecified, all ClusterCatalogs will be used in - the bundle selection process. + When unspecified, all ClusterCatalogs are used in the bundle selection process. properties: matchExpressions: description: matchExpressions is a list of label selector requirements. The requirements are ANDed. @@ -409,35 +435,34 @@ spec: upgradeConstraintPolicy: default: CatalogProvided description: |- - upgradeConstraintPolicy is an optional field that controls whether - the upgrade path(s) defined in the catalog are enforced for the package - referenced in the packageName field. + upgradeConstraintPolicy is optional and controls whether the upgrade paths defined in the catalog + are enforced for the package referenced in the packageName field. - Allowed values are: "CatalogProvided" or "SelfCertified", or omitted. + Allowed values are "CatalogProvided", "SelfCertified", or omitted. - When this field is set to "CatalogProvided", automatic upgrades will only occur - when upgrade constraints specified by the package author are met. + When set to "CatalogProvided", automatic upgrades only occur when upgrade constraints specified by the package + author are met. - When this field is set to "SelfCertified", the upgrade constraints specified by - the package author are ignored. This allows for upgrades and downgrades to - any version of the package. This is considered a dangerous operation as it - can lead to unknown and potentially disastrous outcomes, such as data - loss. It is assumed that users have independently verified changes when - using this option. + When set to "SelfCertified", the upgrade constraints specified by the package author are ignored. + This allows upgrades and downgrades to any version of the package. + This is considered a dangerous operation as it can lead to unknown and potentially disastrous outcomes, + such as data loss. + Use this option only if you have independently verified the changes. - When this field is omitted, the default value is "CatalogProvided". + When omitted, the default value is "CatalogProvided". enum: - CatalogProvided - SelfCertified type: string version: description: |- - version is an optional semver constraint (a specific version or range of versions). When unspecified, the latest version available will be installed. + version is an optional semver constraint (a specific version or range of versions). + When unspecified, the latest version available is installed. Acceptable version ranges are no longer than 64 characters. - Version ranges are composed of comma- or space-delimited values and one or - more comparison operators, known as comparison strings. Additional - comparison strings can be added using the OR operator (||). + Version ranges are composed of comma- or space-delimited values and one or more comparison operators, + known as comparison strings. + You can add additional comparison strings using the OR operator (||). # Range Comparisons @@ -515,13 +540,12 @@ spec: type: object sourceType: description: |- - sourceType is a required reference to the type of install source. + sourceType is required and specifies the type of install source. - Allowed values are "Catalog" + The only allowed value is "Catalog". - When this field is set to "Catalog", information for determining the - appropriate bundle of content to install will be fetched from - ClusterCatalog resources existing on the cluster. + When set to "Catalog", information for determining the appropriate bundle of content to install + is fetched from ClusterCatalog resources on the cluster. When using the Catalog sourceType, the catalog field must also be set. enum: - Catalog @@ -542,23 +566,25 @@ spec: properties: conditions: description: |- + conditions represents the current state of the ClusterExtension. + The set of condition types which apply to all spec.source variations are Installed and Progressing. - The Installed condition represents whether or not the bundle has been installed for this ClusterExtension. - When Installed is True and the Reason is Succeeded, the bundle has been successfully installed. - When Installed is False and the Reason is Failed, the bundle has failed to install. + The Installed condition represents whether the bundle has been installed for this ClusterExtension: + - When Installed is True and the Reason is Succeeded, the bundle has been successfully installed. + - When Installed is False and the Reason is Failed, the bundle has failed to install. The Progressing condition represents whether or not the ClusterExtension is advancing towards a new state. When Progressing is True and the Reason is Succeeded, the ClusterExtension is making progress towards a new state. When Progressing is True and the Reason is Retrying, the ClusterExtension has encountered an error that could be resolved on subsequent reconciliation attempts. When Progressing is False and the Reason is Blocked, the ClusterExtension has encountered an error that requires manual intervention for recovery. - When the ClusterExtension is sourced from a catalog, if may also communicate a deprecation condition. - These are indications from a package owner to guide users away from a particular package, channel, or bundle. - BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog. - ChannelDeprecated is set if the requested channel is marked deprecated in the catalog. - PackageDeprecated is set if the requested package is marked deprecated in the catalog. - Deprecated is a rollup condition that is present when any of the deprecated conditions are present. + When the ClusterExtension is sourced from a catalog, it may also communicate a deprecation condition. + These are indications from a package owner to guide users away from a particular package, channel, or bundle: + - BundleDeprecated is set if the requested bundle version is marked deprecated in the catalog. + - ChannelDeprecated is set if the requested channel is marked deprecated in the catalog. + - PackageDeprecated is set if the requested package is marked deprecated in the catalog. + - Deprecated is a rollup condition that is present when any of the deprecated conditions are present. items: description: Condition contains details for one aspect of the current state of this API Resource. properties: @@ -621,25 +647,24 @@ spec: properties: bundle: description: |- - bundle is a required field which represents the identifying attributes of a bundle. + bundle is required and represents the identifying attributes of a bundle. - A "bundle" is a versioned set of content that represents the resources that - need to be applied to a cluster to install a package. + A "bundle" is a versioned set of content that represents the resources that need to be applied + to a cluster to install a package. properties: name: description: |- - name is required and follows the DNS subdomain standard - as defined in [RFC 1123]. It must contain only lowercase alphanumeric characters, - hyphens (-) or periods (.), start and end with an alphanumeric character, - and be no longer than 253 characters. + name is required and follows the DNS subdomain standard as defined in [RFC 1123]. + It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), + start and end with an alphanumeric character, and be no longer than 253 characters. type: string x-kubernetes-validations: - message: packageName must be a valid DNS1123 subdomain. It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.), start and end with an alphanumeric character, and be no longer than 253 characters rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$") version: description: |- - version is a required field and is a reference to the version that this bundle represents - version follows the semantic versioning standard as defined in https://semver.org/. + version is required and references the version that this bundle represents. + It follows the semantic versioning standard as defined in https://semver.org/. type: string x-kubernetes-validations: - message: version must be well-formed semver diff --git a/requirements.txt b/requirements.txt index 35b0e3fc6..b94adfcfb 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ Babel==2.17.0 -beautifulsoup4==4.14.2 +beautifulsoup4==4.14.3 certifi==2025.11.12 charset-normalizer==3.4.4 click==8.3.1 @@ -14,14 +14,14 @@ markdown2==2.5.4 MarkupSafe==3.0.3 mergedeep==1.3.4 mkdocs==1.6.1 -mkdocs-material==9.7.0 +mkdocs-material==9.7.1 mkdocs-material-extensions==1.3.1 packaging==25.0 paginate==0.5.7 pathspec==0.12.1 -platformdirs==4.5.0 +platformdirs==4.5.1 Pygments==2.19.2 -pymdown-extensions==10.17.2 +pymdown-extensions==10.19.1 pyquery==2.0.1 python-dateutil==2.9.0.post0 PyYAML==6.0.3 @@ -30,6 +30,6 @@ readtime==3.0.0 regex==2025.11.3 requests==2.32.5 six==1.17.0 -soupsieve==2.8 -urllib3==2.6.0 +soupsieve==2.8.1 +urllib3==2.6.3 watchdog==6.0.0 diff --git a/test/e2e/README.md b/test/e2e/README.md new file mode 100644 index 000000000..c3483e518 --- /dev/null +++ b/test/e2e/README.md @@ -0,0 +1,350 @@ +# E2E Tests - Godog Framework + +This directory contains end-to-end (e2e) tests, written using the [Godog](https://github.com/cucumber/godog) framework. + +## Overview + +### What is Godog/BDD/Cucumber? + +Godog is a Behavior-Driven Development (BDD) framework that allows you to write tests in a human-readable format called +[Gherkin](https://cucumber.io/docs/gherkin/reference/). Tests are written as scenarios using Given-When-Then syntax, making them accessible to both technical and +non-technical stakeholders. + +**Benefits:** + +- **Readable**: Tests serve as living documentation +- **Maintainable**: Reusable step definitions reduce code duplication +- **Collaborative**: Product owners and developers share the same test specifications +- **Structured**: Clear separation between test scenarios and implementation + +## Project Structure + +``` +test/e2e/ +├── README.md # This file +├── features_test.go # Test runner and suite initialization +├── features/ # Gherkin feature files +│ ├── install.feature # ClusterExtension installation scenarios +│ ├── update.feature # ClusterExtension update scenarios +│ ├── recover.feature # Recovery scenarios +│ ├── status.feature # ClusterExtension status scenarios +│ └── metrics.feature # Metrics endpoint scenarios +└── steps/ # Step definitions and test utilities + ├── steps.go # Step definition implementations + ├── hooks.go # Test hooks and scenario context + └── testdata/ # Test data (RBAC templates, catalogs) + ├── rbac-template.yaml + ├── cluster-admin-rbac-template.yaml + ├── metrics-reader-rbac-template.yaml + ├── test-catalog-template.yaml + ├── extra-catalog-template.yaml + └── ... +``` + +## Architecture + +### 1. Test Runner (`features_test.go`) + +The main test entry point that configures and runs the Godog test suite. + +### 2. Feature Files (`features/*.feature`) + +Gherkin files that describe test scenarios in natural language. + +**Structure:** + +```gherkin +Feature: [Feature Name] + [Feature description] + + Background: + [Common setup steps for all scenarios] + + Scenario: [Scenario Name] + Given [precondition] + When [action] + Then [expected result] + And [additional assertions] +``` + +**Example:** + +```gherkin +Feature: Install ClusterExtension + + Background: + Given OLM is available + And "test" catalog serves bundles + And Service account "olm-sa" with needed permissions is available in test namespace + + Scenario: Install latest available version from the default channel + When ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: ${NAME} + spec: + namespace: ${TEST_NAMESPACE} + serviceAccount: + name: olm-sa + source: + sourceType: Catalog + catalog: + packageName: test + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + ... + """ + Then ClusterExtension is rolled out + And ClusterExtension is available +``` + +### 3. Step Definitions (`steps/steps.go`) + +Go functions that implement the steps defined in feature files. Each step is registered with a regex pattern that +matches the Gherkin text. + +**Registration:** + +```go +func RegisterSteps(sc *godog.ScenarioContext) { +sc.Step(`^OLM is available$`, OLMisAvailable) +sc.Step(`^bundle "([^"]+)" is installed in version "([^"]+)"$`, BundleInstalled) +sc.Step(`^ClusterExtension is applied$`, ResourceIsApplied) +// ... more steps +} +``` + +**Step Implementation Pattern:** + +```go +func BundleInstalled(ctx context.Context, name, version string) error { + sc := scenarioCtx(ctx) + waitFor(ctx, func() bool { + v, err := kubectl("get", "clusterextension", sc.clusterExtensionName, "-o", "jsonpath={.status.install.bundle}") + if err != nil { + return false + } + var bundle map[string]interface{} + json.Unmarshal([]byte(v), &bundle) + return bundle["name"] == name && bundle["version"] == version + }) + return nil +} +``` + +### 4. Hooks and Context (`steps/hooks.go`) + +Manages test lifecycle and scenario-specific context. + +**Hooks:** + +- `CheckFeatureTags`: Skips scenarios based on feature gate tags (e.g., `@WebhookProviderCertManager`) +- `CreateScenarioContext`: Creates unique namespace and names for each scenario +- `ScenarioCleanup`: Cleans up resources after each scenario + +**Variable Substitution:** + +Replaces `${TEST_NAMESPACE}`, `${NAME}`, and `${CATALOG_IMG}` with scenario-specific values. + +## Writing Tests + +### 1. Create a Feature File + +Create a new `.feature` file in `test/e2e/features/`: + +```gherkin +Feature: Your Feature Name + Description of what this feature tests + + Background: + Given OLM is available + And "test" catalog serves bundles + + Scenario: Your scenario description + When [some action] + Then [expected outcome] +``` + +### 2. Implement Step Definitions + +Add step implementations in `steps/steps.go`: + +```go +func RegisterSteps(sc *godog.ScenarioContext) { + // ... existing steps + sc.Step(`^your step pattern "([^"]+)"$`, YourStepFunction) +} + +func YourStepFunction(ctx context.Context, param string) error { + sc := scenarioCtx(ctx) + // Implementation + return nil +} +``` + +### 3. Use Existing Steps + +Leverage existing steps for common operations: + +- **Setup**: `Given OLM is available`, `And "test" catalog serves bundles` +- **Resource Management**: `When ClusterExtension is applied`, `And resource is applied` +- **Assertions**: `Then ClusterExtension is available`, `And bundle "..." is installed` +- **Conditions**: `Then ClusterExtension reports Progressing as True with Reason Retrying:` + +### 4. Variable Substitution + +Use these variables in YAML templates: + +- `${NAME}`: Scenario-specific ClusterExtension name (e.g., `ce-123`) +- `${TEST_NAMESPACE}`: Scenario-specific namespace (e.g., `ns-123`) +- `${CATALOG_IMG}`: Catalog image reference (defaults to in-cluster registry, overridable via `CATALOG_IMG` env var) + +### 5. Feature Tags + +Use tags to conditionally run scenarios based on feature gates: + +```gherkin +@WebhookProviderCertManager +Scenario: Install operator having webhooks +``` + +Scenarios are skipped if the feature gate is not enabled on the deployed controller. + +## Running Tests + +### Run All Tests + +```bash +make test-e2e +``` + +or + +```bash +make test-experimental-e2e +``` + + +### Run Specific Feature + +```bash +go test test/e2e/features_test.go -- features/install.feature +``` + +### Run Specific Scenario by Tag + +```bash +go test test/e2e/features_test.go --godog.tags="@WebhookProviderCertManager" +``` + +### Run with Debug Logging + +```bash +go test -v test/e2e/features_test.go --log.debug +``` + +### CLI Options + +Godog options can be passed after `--`: + +```bash +go test test/e2e/features_test.go \ + --godog.format=pretty \ + --godog.tags="@WebhookProviderCertManager" +``` + +Available formats: `pretty`, `cucumber`, `progress`, `junit` + +**Custom Flags:** + +- `--log.debug`: Enable debug logging (development mode) +- `--k8s.cli=`: Specify path to Kubernetes CLI (default: `kubectl`) + - Useful for using `oc` or a specific kubectl binary + +**Example:** + +```bash +go test test/e2e/features_test.go --log.debug --k8s.cli=oc +``` + +### Environment Variables + +- `KUBECONFIG`: Path to kubeconfig file (defaults to `~/.kube/config`) +- `E2E_SUMMARY_OUTPUT`: Path to write test summary (optional) +- `CATALOG_IMG`: Override default catalog image reference (optional) +- `LOCAL_REGISTRY_HOST`: Local registry host for catalog images + +## Design Patterns + +### 1. Scenario Isolation + +Each scenario runs in its own namespace with unique resource names, ensuring complete isolation: + +- Namespace: `ns-{scenario-id}` +- ClusterExtension: `ce-{scenario-id}` + +### 2. Automatic Cleanup + +The `ScenarioCleanup` hook ensures all resources are deleted after each scenario: + +- Kills background processes (e.g., kubectl port-forward) +- Deletes ClusterExtensions +- Deletes namespaces +- Deletes added resources + +### 3. Declarative Resource Management + +Resources are managed declaratively using YAML templates embedded in feature files as docstrings: + +```gherkin +When ClusterExtension is applied +""" + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: ${NAME} + spec: + ... + """ +``` + +### 4. Polling with Timeouts + +All asynchronous operations use `waitFor` with consistent timeout (300s) and tick (1s): + +```go +waitFor(ctx, func() bool { + // Check condition + return conditionMet +}) +``` + +### 5. Feature Gate Detection + +Tests automatically detect enabled feature gates from the running controller and skip scenarios that require disabled +features. + +## Common Step Patterns + +A list of available, implemented steps can be obtained by running: + +```shell +go test test/e2e/features_test.go -d +``` + +## Best Practices + +1. **Keep scenarios focused**: Each scenario should test one specific behavior +2. **Use Background wisely**: Common setup steps belong in Background +3. **Reuse steps**: Leverage existing step definitions before creating new ones +4. **Meaningful names**: Scenario names should clearly describe what is being tested +5. **Avoid implementation details**: Focus on behavior, not implementation + +## References + +- [Godog Documentation](https://github.com/cucumber/godog) +- [Gherkin Reference](https://cucumber.io/docs/gherkin/reference/) +- [Cucumber Best Practices](https://cucumber.io/docs/guides/10-minute-tutorial/) diff --git a/test/e2e/cluster_extension_install_test.go b/test/e2e/cluster_extension_install_test.go deleted file mode 100644 index b1994d7e0..000000000 --- a/test/e2e/cluster_extension_install_test.go +++ /dev/null @@ -1,798 +0,0 @@ -package e2e - -import ( - "context" - "fmt" - "os" - "slices" - "testing" - "time" - - "github.com/google/go-containerregistry/pkg/crane" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - networkingv1 "k8s.io/api/networking/v1" - "k8s.io/apimachinery/pkg/api/errors" - apimeta "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/rand" - "k8s.io/utils/ptr" - - ocv1 "github.com/operator-framework/operator-controller/api/v1" - testutil "github.com/operator-framework/operator-controller/internal/shared/util/test" - . "github.com/operator-framework/operator-controller/test/helpers" -) - -const ( - artifactName = "operator-controller-e2e" - pollDuration = time.Minute - pollInterval = time.Second - testCatalogRefEnvVar = "CATALOG_IMG" - testCatalogName = "test-catalog" -) - -func TestClusterExtensionInstallRegistry(t *testing.T) { - type testCase struct { - name string - packageName string - } - for _, tc := range []testCase{ - { - name: "no registry configuration necessary", - packageName: "test", - }, - { - // NOTE: This test requires an extra configuration in /etc/containers/registries.conf, which is mounted - // for this e2e via the ./config/components/e2e/registries-conf kustomize component as part of the e2e component. - // The goal here is to prove that "mirrored-registry.operator-controller-e2e.svc.cluster.local:5000" is - // mapped to the "real" registry hostname ("docker-registry.operator-controller-e2e.svc.cluster.local:5000"). - name: "package requires mirror registry configuration in /etc/containers/registries.conf", - packageName: "test-mirrored", - }, - } { - t.Run(tc.name, func(t *testing.T) { - t.Log("When a cluster extension is installed from a catalog") - t.Log("When the extension bundle format is registry+v1") - - clusterExtension, extensionCatalog, sa, ns := TestInit(t) - defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer testutil.CollectTestArtifacts(t, artifactName, c, cfg) - - clusterExtension.Spec = ocv1.ClusterExtensionSpec{ - Source: ocv1.SourceConfig{ - SourceType: "Catalog", - Catalog: &ocv1.CatalogFilter{ - PackageName: tc.packageName, - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"olm.operatorframework.io/metadata.name": extensionCatalog.Name}, - }, - }, - }, - Namespace: ns.Name, - ServiceAccount: ocv1.ServiceAccountReference{ - Name: sa.Name, - }, - } - t.Log("It resolves the specified package with correct bundle path") - t.Log("By creating the ClusterExtension resource") - require.NoError(t, c.Create(context.Background(), clusterExtension)) - - t.Log("By eventually reporting a successful resolution and bundle path") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - }, pollDuration, pollInterval) - - t.Log("By eventually reporting progressing as True") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - }, pollDuration, pollInterval) - - t.Log("By eventually installing the package successfully") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - require.Contains(ct, cond.Message, "Installed bundle") - require.NotEmpty(ct, clusterExtension.Status.Install.Bundle) - }, pollDuration, pollInterval) - - t.Log("By eventually creating the NetworkPolicy named 'test-operator-network-policy'") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - var np networkingv1.NetworkPolicy - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: "test-operator-network-policy", Namespace: ns.Name}, &np)) - }, pollDuration, pollInterval) - - t.Log("By verifying that no templating occurs for registry+v1 bundle manifests") - cm := corev1.ConfigMap{} - require.NoError(t, c.Get(context.Background(), types.NamespacedName{Namespace: ns.Name, Name: "test-configmap"}, &cm)) - require.Contains(t, cm.Annotations, "shouldNotTemplate") - require.Contains(t, cm.Annotations["shouldNotTemplate"], "{{ $labels.namespace }}") - }) - } -} - -func TestClusterExtensionInstallRegistryDynamic(t *testing.T) { - // NOTE: Like 'TestClusterExtensionInstallRegistry', this test also requires extra configuration in /etc/containers/registries.conf - packageName := "dynamic" - - t.Log("When a cluster extension is installed from a catalog") - t.Log("When the extension bundle format is registry+v1") - - clusterExtension, extensionCatalog, sa, ns := TestInit(t) - defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer testutil.CollectTestArtifacts(t, artifactName, c, cfg) - - clusterExtension.Spec = ocv1.ClusterExtensionSpec{ - Source: ocv1.SourceConfig{ - SourceType: "Catalog", - Catalog: &ocv1.CatalogFilter{ - PackageName: packageName, - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"olm.operatorframework.io/metadata.name": extensionCatalog.Name}, - }, - }, - }, - Namespace: ns.Name, - ServiceAccount: ocv1.ServiceAccountReference{ - Name: sa.Name, - }, - } - t.Log("It updates the registries.conf file contents") - cm := corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "e2e-registries-conf", - Namespace: "olmv1-system", - }, - Data: map[string]string{ - "registries.conf": `[[registry]] -prefix = "dynamic-registry.operator-controller-e2e.svc.cluster.local:5000" -location = "docker-registry.operator-controller-e2e.svc.cluster.local:5000"`, - }, - } - require.NoError(t, c.Update(context.Background(), &cm)) - - t.Log("It resolves the specified package with correct bundle path") - t.Log("By creating the ClusterExtension resource") - require.NoError(t, c.Create(context.Background(), clusterExtension)) - - t.Log("By eventually reporting a successful resolution and bundle path") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - }, 2*time.Minute, pollInterval) - - // Give the check 2 minutes instead of the typical 1 for the pod's - // files to update from the configmap change. - // The theoretical max time is the kubelet sync period of 1 minute + - // ConfigMap cache TTL of 1 minute = 2 minutes - t.Log("By eventually reporting progressing as True") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - }, 2*time.Minute, pollInterval) - - t.Log("By eventually installing the package successfully") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - require.Contains(ct, cond.Message, "Installed bundle") - require.NotEmpty(ct, clusterExtension.Status.Install.Bundle) - }, pollDuration, pollInterval) -} - -func TestClusterExtensionInstallRegistryMultipleBundles(t *testing.T) { - t.Log("When a cluster extension is installed from a catalog") - - clusterExtension, extensionCatalog, sa, ns := TestInit(t) - extraCatalogName := fmt.Sprintf("extra-test-catalog-%s", rand.String(8)) - extraCatalog, err := CreateTestCatalog(context.Background(), extraCatalogName, os.Getenv(testCatalogRefEnvVar)) - require.NoError(t, err) - - defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer testutil.CollectTestArtifacts(t, artifactName, c, cfg) - defer func(cat *ocv1.ClusterCatalog) { - require.NoError(t, c.Delete(context.Background(), cat)) - require.Eventually(t, func() bool { - err := c.Get(context.Background(), types.NamespacedName{Name: cat.Name}, &ocv1.ClusterCatalog{}) - return errors.IsNotFound(err) - }, pollDuration, pollInterval) - }(extraCatalog) - - clusterExtension.Spec = ocv1.ClusterExtensionSpec{ - Source: ocv1.SourceConfig{ - SourceType: "Catalog", - Catalog: &ocv1.CatalogFilter{ - PackageName: "test", - }, - }, - Namespace: ns.Name, - ServiceAccount: ocv1.ServiceAccountReference{ - Name: sa.Name, - }, - } - t.Log("It resolves to multiple bundle paths") - t.Log("By creating the ClusterExtension resource") - require.NoError(t, c.Create(context.Background(), clusterExtension)) - - t.Log("By eventually reporting a failed resolution with multiple bundles") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - }, pollDuration, pollInterval) - - t.Log("By eventually reporting Progressing == True and Reason Retrying") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonRetrying, cond.Reason) - // Catalog names are sorted alphabetically in the error message - catalogs := []string{extensionCatalog.Name, extraCatalog.Name} - slices.Sort(catalogs) - expectedMessage := fmt.Sprintf("in multiple catalogs with the same priority %v", catalogs) - require.Contains(ct, cond.Message, expectedMessage) - }, pollDuration, pollInterval) -} - -func TestClusterExtensionBlockInstallNonSuccessorVersion(t *testing.T) { - t.Log("When a cluster extension is installed from a catalog") - t.Log("When resolving upgrade edges") - - clusterExtension, extensionCatalog, sa, ns := TestInit(t) - defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer testutil.CollectTestArtifacts(t, artifactName, c, cfg) - - t.Log("By creating an ClusterExtension at a specified version") - clusterExtension.Spec = ocv1.ClusterExtensionSpec{ - Source: ocv1.SourceConfig{ - SourceType: "Catalog", - Catalog: &ocv1.CatalogFilter{ - PackageName: "test", - Version: "1.0.0", - // No Selector since this is an exact version match - }, - }, - Namespace: ns.Name, - ServiceAccount: ocv1.ServiceAccountReference{ - Name: sa.Name, - }, - } - require.NoError(t, c.Create(context.Background(), clusterExtension)) - t.Log("By eventually reporting a successful installation") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - require.Equal(ct, - &ocv1.ClusterExtensionInstallStatus{Bundle: ocv1.BundleMetadata{ - Name: "test-operator.1.0.0", - Version: "1.0.0", - }}, - clusterExtension.Status.Install, - ) - - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - }, pollDuration, pollInterval) - - t.Log("It does not allow to upgrade the ClusterExtension to a non-successor version") - t.Log("By updating the ClusterExtension resource to a non-successor version") - // 1.2.0 does not replace/skip/skipRange 1.0.0. - clusterExtension.Spec.Source.Catalog.Version = "1.2.0" - require.NoError(t, c.Update(context.Background(), clusterExtension)) - t.Log("By eventually reporting an unsatisfiable resolution") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - }, pollDuration, pollInterval) - - t.Log("By eventually reporting Progressing == True and Reason Retrying") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, ocv1.ReasonRetrying, cond.Reason) - require.Equal(ct, "error upgrading from currently installed version \"1.0.0\": no bundles found for package \"test\" matching version \"1.2.0\"", cond.Message) - }, pollDuration, pollInterval) -} - -func TestClusterExtensionForceInstallNonSuccessorVersion(t *testing.T) { - t.Log("When a cluster extension is installed from a catalog") - t.Log("When resolving upgrade edges") - - clusterExtension, extensionCatalog, sa, ns := TestInit(t) - defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer testutil.CollectTestArtifacts(t, artifactName, c, cfg) - - t.Log("By creating an ClusterExtension at a specified version") - clusterExtension.Spec = ocv1.ClusterExtensionSpec{ - Source: ocv1.SourceConfig{ - SourceType: "Catalog", - Catalog: &ocv1.CatalogFilter{ - PackageName: "test", - Version: "1.0.0", - }, - }, - Namespace: ns.Name, - ServiceAccount: ocv1.ServiceAccountReference{ - Name: sa.Name, - }, - } - require.NoError(t, c.Create(context.Background(), clusterExtension)) - t.Log("By eventually reporting a successful resolution") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - }, pollDuration, pollInterval) - - t.Log("It allows to upgrade the ClusterExtension to a non-successor version") - t.Log("By updating the ClusterExtension resource to a non-successor version") - // 1.2.0 does not replace/skip/skipRange 1.0.0. - clusterExtension.Spec.Source.Catalog.Version = "1.2.0" - clusterExtension.Spec.Source.Catalog.UpgradeConstraintPolicy = ocv1.UpgradeConstraintPolicySelfCertified - require.NoError(t, c.Update(context.Background(), clusterExtension)) - t.Log("By eventually reporting a satisfiable resolution") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - }, pollDuration, pollInterval) -} - -func TestClusterExtensionInstallSuccessorVersion(t *testing.T) { - t.Log("When a cluster extension is installed from a catalog") - t.Log("When resolving upgrade edges") - clusterExtension, extensionCatalog, sa, ns := TestInit(t) - defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer testutil.CollectTestArtifacts(t, artifactName, c, cfg) - - t.Log("By creating an ClusterExtension at a specified version") - clusterExtension.Spec = ocv1.ClusterExtensionSpec{ - Source: ocv1.SourceConfig{ - SourceType: "Catalog", - Catalog: &ocv1.CatalogFilter{ - PackageName: "test", - Version: "1.0.0", - }, - }, - Namespace: ns.Name, - ServiceAccount: ocv1.ServiceAccountReference{ - Name: sa.Name, - }, - } - require.NoError(t, c.Create(context.Background(), clusterExtension)) - t.Log("By eventually reporting a successful resolution") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - }, pollDuration, pollInterval) - - t.Log("It does allow to upgrade the ClusterExtension to any of the successor versions within non-zero major version") - t.Log("By updating the ClusterExtension resource by skipping versions") - // 1.0.1 replaces 1.0.0 in the test catalog - clusterExtension.Spec.Source.Catalog.Version = "1.0.1" - require.NoError(t, c.Update(context.Background(), clusterExtension)) - t.Log("By eventually reporting a successful resolution and bundle path") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - }, pollDuration, pollInterval) -} - -func TestClusterExtensionInstallReResolvesWhenCatalogIsPatched(t *testing.T) { - t.Log("When a cluster extension is installed from a catalog") - t.Log("It resolves again when a catalog is patched with new ImageRef") - clusterExtension, extensionCatalog, sa, ns := TestInit(t) - defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer testutil.CollectTestArtifacts(t, artifactName, c, cfg) - - clusterExtension.Spec = ocv1.ClusterExtensionSpec{ - Source: ocv1.SourceConfig{ - SourceType: "Catalog", - Catalog: &ocv1.CatalogFilter{ - PackageName: "test", - Selector: &metav1.LabelSelector{ - MatchExpressions: []metav1.LabelSelectorRequirement{ - { - Key: "olm.operatorframework.io/metadata.name", - Operator: metav1.LabelSelectorOpIn, - Values: []string{extensionCatalog.Name}, - }, - }, - }, - }, - }, - Namespace: ns.Name, - ServiceAccount: ocv1.ServiceAccountReference{ - Name: sa.Name, - }, - } - t.Log("It resolves the specified package with correct bundle path") - t.Log("By creating the ClusterExtension resource") - require.NoError(t, c.Create(context.Background(), clusterExtension)) - - t.Log("By reporting a successful resolution and bundle path") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - }, pollDuration, pollInterval) - - // patch imageRef tag on test-catalog image with v2 image - t.Log("By patching the catalog ImageRef to point to the v2 catalog") - updatedCatalogImage := fmt.Sprintf("%s/e2e/test-catalog:v2", os.Getenv("CLUSTER_REGISTRY_HOST")) - err := patchTestCatalog(context.Background(), extensionCatalog.Name, updatedCatalogImage) - require.NoError(t, err) - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: extensionCatalog.Name}, extensionCatalog)) - cond := apimeta.FindStatusCondition(extensionCatalog.Status.Conditions, ocv1.TypeServing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonAvailable, cond.Reason) - }, pollDuration, pollInterval) - - t.Log("By eventually installing the package successfully") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - require.Contains(ct, cond.Message, "Installed bundle") - require.Contains(ct, clusterExtension.Status.Install.Bundle.Version, "1.3.0") - }, pollDuration, pollInterval) -} - -func TestClusterExtensionInstallReResolvesWhenNewCatalog(t *testing.T) { - t.Log("When a cluster extension is installed from a catalog") - t.Log("It resolves again when a new catalog is available") - - // Tag the image with the new tag - var err error - v1Image := fmt.Sprintf("%s/%s", os.Getenv("LOCAL_REGISTRY_HOST"), os.Getenv("E2E_TEST_CATALOG_V1")) - err = crane.Tag(v1Image, latestImageTag, crane.Insecure) - require.NoError(t, err) - - // create a test-catalog with latest image tag - catalogName := fmt.Sprintf("test-catalog-%s", rand.String(8)) - latestCatalogImage := fmt.Sprintf("%s/e2e/test-catalog:latest", os.Getenv("CLUSTER_REGISTRY_HOST")) - extensionCatalog, err := CreateTestCatalog(context.Background(), catalogName, latestCatalogImage) - require.NoError(t, err) - clusterExtensionName := fmt.Sprintf("clusterextension-%s", rand.String(8)) - clusterExtension := &ocv1.ClusterExtension{ - ObjectMeta: metav1.ObjectMeta{ - Name: clusterExtensionName, - }, - } - ns, err := CreateNamespace(context.Background(), clusterExtensionName) - require.NoError(t, err) - sa, err := CreateServiceAccount(context.Background(), types.NamespacedName{Name: clusterExtensionName, Namespace: ns.Name}, clusterExtensionName) - require.NoError(t, err) - defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer testutil.CollectTestArtifacts(t, artifactName, c, cfg) - - clusterExtension.Spec = ocv1.ClusterExtensionSpec{ - Source: ocv1.SourceConfig{ - SourceType: "Catalog", - Catalog: &ocv1.CatalogFilter{ - PackageName: "test", - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"olm.operatorframework.io/metadata.name": extensionCatalog.Name}, - }, - }, - }, - Namespace: ns.Name, - ServiceAccount: ocv1.ServiceAccountReference{ - Name: sa.Name, - }, - } - t.Log("It resolves the specified package with correct bundle path") - t.Log("By creating the ClusterExtension resource") - require.NoError(t, c.Create(context.Background(), clusterExtension)) - - t.Log("By reporting a successful resolution and bundle path") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - }, pollDuration, pollInterval) - - // update tag on test-catalog image with v2 image - t.Log("By updating the catalog tag to point to the v2 catalog") - v2Image := fmt.Sprintf("%s/%s", os.Getenv("LOCAL_REGISTRY_HOST"), os.Getenv("E2E_TEST_CATALOG_V2")) - err = crane.Tag(v2Image, latestImageTag, crane.Insecure) - require.NoError(t, err) - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: extensionCatalog.Name}, extensionCatalog)) - cond := apimeta.FindStatusCondition(extensionCatalog.Status.Conditions, ocv1.TypeServing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonAvailable, cond.Reason) - }, pollDuration, pollInterval) - - t.Log("By eventually reporting a successful resolution and bundle path") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - }, pollDuration, pollInterval) -} - -func TestClusterExtensionInstallReResolvesWhenManagedContentChanged(t *testing.T) { - t.Log("When a cluster extension is installed from a catalog") - t.Log("It resolves again when managed content is changed") - clusterExtension, extensionCatalog, sa, ns := TestInit(t) - defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer testutil.CollectTestArtifacts(t, artifactName, c, cfg) - - clusterExtension.Spec = ocv1.ClusterExtensionSpec{ - Source: ocv1.SourceConfig{ - SourceType: "Catalog", - Catalog: &ocv1.CatalogFilter{ - PackageName: "test", - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"olm.operatorframework.io/metadata.name": extensionCatalog.Name}, - }, - }, - }, - Namespace: ns.Name, - ServiceAccount: ocv1.ServiceAccountReference{ - Name: sa.Name, - }, - } - t.Log("It installs the specified package with correct bundle path") - t.Log("By creating the ClusterExtension resource") - require.NoError(t, c.Create(context.Background(), clusterExtension)) - - t.Log("By reporting a successful installation") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - require.Contains(ct, cond.Message, "Installed bundle") - }, pollDuration, pollInterval) - - t.Log("By deleting a managed resource") - testConfigMap := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-configmap", - Namespace: clusterExtension.Spec.Namespace, - }, - } - require.NoError(t, c.Delete(context.Background(), testConfigMap)) - - t.Log("By eventually re-creating the managed resource") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: testConfigMap.Name, Namespace: testConfigMap.Namespace}, testConfigMap)) - }, pollDuration, pollInterval) -} - -func TestClusterExtensionRecoversFromNoNamespaceWhenFailureFixed(t *testing.T) { - t.Log("When a cluster extension is installed from a catalog") - t.Log("When the extension bundle format is registry+v1") - - t.Log("By not creating the Namespace and ServiceAccount") - clusterExtension, extensionCatalog := TestInitClusterExtensionClusterCatalog(t) - - defer TestCleanup(t, extensionCatalog, clusterExtension, nil, nil) - defer testutil.CollectTestArtifacts(t, artifactName, c, cfg) - - clusterExtension.Spec = ocv1.ClusterExtensionSpec{ - Source: ocv1.SourceConfig{ - SourceType: "Catalog", - Catalog: &ocv1.CatalogFilter{ - PackageName: "test", - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"olm.operatorframework.io/metadata.name": extensionCatalog.Name}, - }, - }, - }, - Namespace: clusterExtension.Name, - ServiceAccount: ocv1.ServiceAccountReference{ - Name: clusterExtension.Name, - }, - } - - t.Log("It resolves the specified package with correct bundle path") - t.Log("By creating the ClusterExtension resource") - require.NoError(t, c.Create(context.Background(), clusterExtension)) - - t.Log("By eventually reporting Progressing == True with Reason Retrying") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonRetrying, cond.Reason) - }, pollDuration, pollInterval) - - t.Log("By eventually reporting Installed != True") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled) - require.NotNil(ct, cond) - require.NotEqual(ct, metav1.ConditionTrue, cond.Status) - }, pollDuration, pollInterval) - - t.Log("By creating the Namespace and ServiceAccount") - sa, ns := TestInitServiceAccountNamespace(t, clusterExtension.Name) - defer TestCleanup(t, nil, nil, sa, ns) - - // NOTE: In order to ensure predictable results we need to ensure we have a single - // known failure with a singular fix operation. Additionally, due to the exponential - // backoff of this eventually check we MUST ensure we do not touch the ClusterExtension - // after creating int the Namespace and ServiceAccount. - t.Log("By eventually installing the package successfully") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - require.Contains(ct, cond.Message, "Installed bundle") - require.NotEmpty(ct, clusterExtension.Status.Install) - }, pollDuration, pollInterval) - - t.Log("By eventually reporting Progressing == True with Reason Success") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - }, pollDuration, pollInterval) -} - -func TestClusterExtensionRecoversFromExistingDeploymentWhenFailureFixed(t *testing.T) { - t.Log("When a cluster extension is installed from a catalog") - t.Log("When the extension bundle format is registry+v1") - - clusterExtension, extensionCatalog, sa, ns := TestInit(t) - - defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer testutil.CollectTestArtifacts(t, artifactName, c, cfg) - - clusterExtension.Spec = ocv1.ClusterExtensionSpec{ - Source: ocv1.SourceConfig{ - SourceType: "Catalog", - Catalog: &ocv1.CatalogFilter{ - PackageName: "test", - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"olm.operatorframework.io/metadata.name": extensionCatalog.Name}, - }, - }, - }, - Namespace: clusterExtension.Name, - ServiceAccount: ocv1.ServiceAccountReference{ - Name: clusterExtension.Name, - }, - } - - t.Log("By creating a new Deployment that can not be adopted") - newDeployment := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-operator", - Namespace: clusterExtension.Name, - }, - Spec: appsv1.DeploymentSpec{ - Replicas: ptr.To(int32(1)), - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": "test-operator"}, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"app": "test-operator"}, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Command: []string{"sleep", "1000"}, - Image: "busybox", - ImagePullPolicy: corev1.PullAlways, - Name: "busybox", - SecurityContext: &corev1.SecurityContext{ - RunAsNonRoot: ptr.To(true), - RunAsUser: ptr.To(int64(1000)), - AllowPrivilegeEscalation: ptr.To(false), - Capabilities: &corev1.Capabilities{ - Drop: []corev1.Capability{ - "ALL", - }, - }, - SeccompProfile: &corev1.SeccompProfile{ - Type: corev1.SeccompProfileTypeRuntimeDefault, - }, - }, - }, - }, - }, - }, - }, - } - require.NoError(t, c.Create(context.Background(), newDeployment)) - - t.Log("It resolves the specified package with correct bundle path") - t.Log("By creating the ClusterExtension resource") - require.NoError(t, c.Create(context.Background(), clusterExtension)) - - t.Log("By eventually reporting Progressing == True with Reason Retrying") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonRetrying, cond.Reason) - }, pollDuration, pollInterval) - - t.Log("By eventually failing to install the package successfully due to no adoption support") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionFalse, cond.Status) - // TODO: We probably _should_ be testing the reason here, but helm and boxcutter applier have different reasons. - // Maybe we change helm to use "Absent" rather than "Failed" since the Progressing condition already captures - // the failure? - //require.Equal(ct, ocv1.ReasonFailed, cond.Reason) - require.Contains(ct, cond.Message, "No bundle installed") - }, pollDuration, pollInterval) - - t.Log("By deleting the new Deployment") - require.NoError(t, c.Delete(context.Background(), newDeployment)) - - // NOTE: In order to ensure predictable results we need to ensure we have a single - // known failure with a singular fix operation. Additionally, due to the exponential - // backoff of this eventually check we MUST ensure we do not touch the ClusterExtension - // after deleting the Deployment. - t.Log("By eventually installing the package successfully") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - require.Contains(ct, cond.Message, "Installed bundle") - require.NotEmpty(ct, clusterExtension.Status.Install) - }, pollDuration, pollInterval) - - t.Log("By eventually reporting Progressing == True with Reason Success") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - }, pollDuration, pollInterval) -} diff --git a/test/e2e/cluster_extension_revision_test.go b/test/e2e/cluster_extension_revision_test.go deleted file mode 100644 index 5c21e66ab..000000000 --- a/test/e2e/cluster_extension_revision_test.go +++ /dev/null @@ -1,261 +0,0 @@ -package e2e - -import ( - "context" - "fmt" - "os" - "slices" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - corev1 "k8s.io/api/core/v1" - apimeta "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/tools/remotecommand" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - - ocv1 "github.com/operator-framework/operator-controller/api/v1" - "github.com/operator-framework/operator-controller/internal/operator-controller/features" - . "github.com/operator-framework/operator-controller/internal/shared/util/test" - . "github.com/operator-framework/operator-controller/test/helpers" -) - -func TestClusterExtensionRevision(t *testing.T) { - SkipIfFeatureGateDisabled(t, string(features.BoxcutterRuntime)) - t.Log("When a cluster extension is installed from a catalog") - t.Log("When the extension bundle format is registry+v1") - - clusterExtension, extensionCatalog, sa, ns := TestInit(t) - defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer CollectTestArtifacts(t, artifactName, c, cfg) - - clusterExtension.Spec = ocv1.ClusterExtensionSpec{ - Source: ocv1.SourceConfig{ - SourceType: "Catalog", - Catalog: &ocv1.CatalogFilter{ - PackageName: "test", - Version: "1.0.1", - // we would also like to force upgrade to 1.0.2, which is not within the upgrade path - UpgradeConstraintPolicy: ocv1.UpgradeConstraintPolicySelfCertified, - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"olm.operatorframework.io/metadata.name": extensionCatalog.Name}, - }, - }, - }, - Namespace: ns.Name, - ServiceAccount: ocv1.ServiceAccountReference{ - Name: sa.Name, - }, - } - t.Log("It resolves the specified package with correct bundle path") - t.Log("By creating the ClusterExtension resource") - require.NoError(t, c.Create(context.Background(), clusterExtension)) - - t.Log("By eventually reporting a successful resolution and bundle path") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - }, pollDuration, pollInterval) - - t.Log("By revision-1 eventually reporting Progressing:True:Succeeded and Available:True:ProbesSucceeded conditions") - var clusterExtensionRevision ocv1.ClusterExtensionRevision - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: fmt.Sprintf("%s-1", clusterExtension.Name)}, &clusterExtensionRevision)) - cond := apimeta.FindStatusCondition(clusterExtensionRevision.Status.Conditions, ocv1.ClusterExtensionRevisionTypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - - cond = apimeta.FindStatusCondition(clusterExtensionRevision.Status.Conditions, ocv1.ClusterExtensionRevisionTypeAvailable) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ClusterExtensionRevisionReasonProbesSucceeded, cond.Reason) - }, pollDuration, pollInterval) - - t.Log("By eventually reporting progressing as True") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - }, pollDuration, pollInterval) - - t.Log("By eventually installing the package successfully") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - require.Contains(ct, cond.Message, "Installed bundle") - require.NotEmpty(ct, clusterExtension.Status.Install.Bundle) - require.Len(ct, clusterExtension.Status.ActiveRevisions, 1) - require.Equal(ct, clusterExtension.Status.ActiveRevisions[0].Name, clusterExtensionRevision.Name) - require.Empty(ct, clusterExtension.Status.ActiveRevisions[0].Conditions) - }, pollDuration, pollInterval) - - t.Log("Check Deployment Availability Probe") - t.Log("By making the operator pod not ready") - podName := getPodName(t, clusterExtension.Spec.Namespace, client.MatchingLabels{"app": "olme2etest"}) - podExec(t, clusterExtension.Spec.Namespace, podName, []string{"rm", "/var/www/ready"}) - - t.Log("By revision-1 eventually reporting Progressing:True:Succeeded and Available:False:ProbeFailure conditions") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: fmt.Sprintf("%s-1", clusterExtension.Name)}, &clusterExtensionRevision)) - cond := apimeta.FindStatusCondition(clusterExtensionRevision.Status.Conditions, ocv1.ClusterExtensionRevisionTypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - - cond = apimeta.FindStatusCondition(clusterExtensionRevision.Status.Conditions, ocv1.ClusterExtensionRevisionTypeAvailable) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionFalse, cond.Status) - require.Equal(ct, ocv1.ClusterExtensionRevisionReasonProbeFailure, cond.Reason) - }, pollDuration, pollInterval) - - t.Log("By propagating Available:False to ClusterExtension") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.ClusterExtensionRevisionTypeAvailable) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionFalse, cond.Status) - }, pollDuration, pollInterval) - - t.Log("By making the operator pod ready") - podName = getPodName(t, clusterExtension.Spec.Namespace, client.MatchingLabels{"app": "olme2etest"}) - podExec(t, clusterExtension.Spec.Namespace, podName, []string{"touch", "/var/www/ready"}) - - t.Log("By revision-1 eventually reporting Progressing:True:Succeeded and Available:True:ProbesSucceeded conditions") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: fmt.Sprintf("%s-1", clusterExtension.Name)}, &clusterExtensionRevision)) - cond := apimeta.FindStatusCondition(clusterExtensionRevision.Status.Conditions, ocv1.ClusterExtensionRevisionTypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - - cond = apimeta.FindStatusCondition(clusterExtensionRevision.Status.Conditions, ocv1.ClusterExtensionRevisionTypeAvailable) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ClusterExtensionRevisionReasonProbesSucceeded, cond.Reason) - }, pollDuration, pollInterval) - - t.Log("By propagating Available:True to ClusterExtension") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.ClusterExtensionRevisionTypeAvailable) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - }, pollDuration, pollInterval) - - t.Log("Check archiving") - t.Log("By upgrading the cluster extension to v1.2.0") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - clusterExtension.Spec.Source.Catalog.Version = "1.2.0" - require.NoError(t, c.Update(context.Background(), clusterExtension)) - }, pollDuration, pollInterval) - - t.Log("By revision-2 eventually reporting Progressing:True:Succeeded and Available:True:ProbesSucceeded conditions") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: fmt.Sprintf("%s-2", clusterExtension.Name)}, &clusterExtensionRevision)) - cond := apimeta.FindStatusCondition(clusterExtensionRevision.Status.Conditions, ocv1.ClusterExtensionRevisionTypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - - cond = apimeta.FindStatusCondition(clusterExtensionRevision.Status.Conditions, ocv1.ClusterExtensionRevisionTypeAvailable) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ClusterExtensionRevisionReasonProbesSucceeded, cond.Reason) - }, pollDuration, pollInterval) - - t.Log("By eventually reporting progressing, available, and installed as True") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - - cond = apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - require.Contains(ct, cond.Message, "Installed bundle") - require.NotEmpty(ct, clusterExtension.Status.Install.Bundle) - }, pollDuration, pollInterval) - - t.Log("By revision-1 eventually reporting Progressing:False:Archived and Available:Unknown:Archived conditions") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: fmt.Sprintf("%s-1", clusterExtension.Name)}, &clusterExtensionRevision)) - cond := apimeta.FindStatusCondition(clusterExtensionRevision.Status.Conditions, ocv1.ClusterExtensionRevisionTypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionFalse, cond.Status) - require.Equal(ct, ocv1.ClusterExtensionRevisionReasonArchived, cond.Reason) - - cond = apimeta.FindStatusCondition(clusterExtensionRevision.Status.Conditions, ocv1.ClusterExtensionRevisionTypeAvailable) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionUnknown, cond.Status) - require.Equal(ct, ocv1.ClusterExtensionRevisionReasonArchived, cond.Reason) - }, pollDuration, pollInterval) - - t.Log("By upgrading the cluster extension to v1.0.2 containing bad image reference") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - clusterExtension.Spec.Source.Catalog.Version = "1.0.2" - require.NoError(t, c.Update(context.Background(), clusterExtension)) - }, pollDuration, pollInterval) - - t.Log("By revision-3 eventually reporting Progressing:True:Succeeded and Available:False:ProbeFailure conditions") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: fmt.Sprintf("%s-3", clusterExtension.Name)}, &clusterExtensionRevision)) - cond := apimeta.FindStatusCondition(clusterExtensionRevision.Status.Conditions, ocv1.ClusterExtensionRevisionTypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - - cond = apimeta.FindStatusCondition(clusterExtensionRevision.Status.Conditions, ocv1.ClusterExtensionRevisionTypeAvailable) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionFalse, cond.Status) - require.Equal(ct, ocv1.ClusterExtensionRevisionReasonProbeFailure, cond.Reason) - }, pollDuration, pollInterval) - - t.Log("By eventually reporting more than one active revision") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - require.Len(ct, clusterExtension.Status.ActiveRevisions, 2) - require.Equal(ct, clusterExtension.Status.ActiveRevisions[0].Name, fmt.Sprintf("%s-2", clusterExtension.Name)) - require.Equal(ct, clusterExtension.Status.ActiveRevisions[1].Name, fmt.Sprintf("%s-3", clusterExtension.Name)) - require.Empty(ct, clusterExtension.Status.ActiveRevisions[0].Conditions) - require.NotEmpty(ct, clusterExtension.Status.ActiveRevisions[1].Conditions) - }, pollDuration, pollInterval) -} - -func getPodName(t *testing.T, podNamespace string, matchingLabels client.MatchingLabels) string { - var podList corev1.PodList - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.List(context.Background(), &podList, client.InNamespace(podNamespace), matchingLabels)) - podList.Items = slices.DeleteFunc(podList.Items, func(pod corev1.Pod) bool { - // Ignore terminating pods - return pod.DeletionTimestamp != nil - }) - require.Len(ct, podList.Items, 1) - }, pollDuration, pollInterval) - return podList.Items[0].Name -} - -func podExec(t *testing.T, podNamespace string, podName string, cmd []string) { - req := cs.CoreV1().RESTClient().Post().Resource("pods").Name(podName).Namespace(podNamespace).SubResource("exec") - req.VersionedParams(&corev1.PodExecOptions{ - Command: cmd, - Stdout: true, - }, scheme.ParameterCodec) - exec, err := remotecommand.NewSPDYExecutor(ctrl.GetConfigOrDie(), "POST", req.URL()) - require.NoError(t, err) - err = exec.StreamWithContext(context.Background(), remotecommand.StreamOptions{Stdout: os.Stdout}) - require.NoError(t, err) -} diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go deleted file mode 100644 index 847f5c753..000000000 --- a/test/e2e/e2e_suite_test.go +++ /dev/null @@ -1,79 +0,0 @@ -package e2e - -import ( - "context" - "fmt" - "os" - "testing" - - apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - - ocv1 "github.com/operator-framework/operator-controller/api/v1" - "github.com/operator-framework/operator-controller/internal/operator-controller/scheme" - testutil "github.com/operator-framework/operator-controller/internal/shared/util/test" -) - -var ( - cfg *rest.Config - c client.Client - cs *kubernetes.Clientset -) - -const ( - testSummaryOutputEnvVar = "E2E_SUMMARY_OUTPUT" - latestImageTag = "latest" -) - -func TestMain(m *testing.M) { - cfg = ctrl.GetConfigOrDie() - - var err error - utilruntime.Must(apiextensionsv1.AddToScheme(scheme.Scheme)) - c, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) - utilruntime.Must(err) - - cs, err = kubernetes.NewForConfig(cfg) - utilruntime.Must(err) - - res := m.Run() - path := os.Getenv(testSummaryOutputEnvVar) - if path == "" { - fmt.Printf("Note: E2E_SUMMARY_OUTPUT is unset; skipping summary generation") - } else { - err = testutil.PrintSummary(path) - if err != nil { - // Fail the run if alerts are found - fmt.Printf("%v", err) - os.Exit(1) - } - } - os.Exit(res) -} - -// patchTestCatalog will patch the existing clusterCatalog on the test cluster, provided -// the context, catalog name, and the image reference. It returns an error -// if any errors occurred while updating the catalog. -func patchTestCatalog(ctx context.Context, name string, newImageRef string) error { - // Fetch the existing ClusterCatalog - catalog := &ocv1.ClusterCatalog{} - err := c.Get(ctx, client.ObjectKey{Name: name}, catalog) - if err != nil { - return err - } - - // Update the ImageRef - catalog.Spec.Source.Image.Ref = newImageRef - - // Patch the ClusterCatalog - err = c.Update(ctx, catalog) - if err != nil { - return err - } - - return err -} diff --git a/test/e2e/features/install.feature b/test/e2e/features/install.feature new file mode 100644 index 000000000..ba59ffe7d --- /dev/null +++ b/test/e2e/features/install.feature @@ -0,0 +1,299 @@ +Feature: Install ClusterExtension + + As an OLM user I would like to install a cluster extension from catalog + or get an appropriate information in case of an error. + + Background: + Given OLM is available + And ClusterCatalog "test" serves bundles + And ServiceAccount "olm-sa" with needed permissions is available in ${TEST_NAMESPACE} + + Scenario: Install latest available version + When ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: ${NAME} + spec: + namespace: ${TEST_NAMESPACE} + serviceAccount: + name: olm-sa + source: + sourceType: Catalog + catalog: + packageName: test + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + """ + Then ClusterExtension is rolled out + And ClusterExtension is available + And bundle "test-operator.1.2.0" is installed in version "1.2.0" + And resource "networkpolicy/test-operator-network-policy" is installed + And resource "configmap/test-configmap" is installed + And resource "deployment/test-operator" is installed + + @mirrored-registry + Scenario Outline: Install latest available version from mirrored registry + When ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: ${NAME} + spec: + namespace: ${TEST_NAMESPACE} + serviceAccount: + name: olm-sa + source: + sourceType: Catalog + catalog: + packageName: + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + """ + Then ClusterExtension is rolled out + And ClusterExtension is available + And bundle "-operator.1.2.0" is installed in version "1.2.0" + And resource "networkpolicy/test-operator-network-policy" is installed + And resource "configmap/test-configmap" is installed + And resource "deployment/test-operator" is installed + + Examples: + | package-name | + | test-mirrored | + | dynamic | + + + Scenario: Report that bundle cannot be installed when it exists in multiple catalogs with same priority + Given ClusterCatalog "extra" serves bundles + When ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: ${NAME} + spec: + namespace: ${TEST_NAMESPACE} + serviceAccount: + name: olm-sa + source: + sourceType: Catalog + catalog: + packageName: test + """ + Then ClusterExtension reports Progressing as True with Reason Retrying and Message: + """ + found bundles for package "test" in multiple catalogs with the same priority [extra-catalog test-catalog] + """ + + @SingleOwnNamespaceInstallSupport + Scenario: watchNamespace config is required for extension supporting single namespace + Given ServiceAccount "olm-admin" in test namespace is cluster admin + And resource is applied + """ + apiVersion: v1 + kind: Namespace + metadata: + name: single-namespace-operator-target + """ + And ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: ${NAME} + spec: + namespace: ${TEST_NAMESPACE} + serviceAccount: + name: olm-admin + source: + sourceType: Catalog + catalog: + packageName: single-namespace-operator + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + """ + And ClusterExtension reports Progressing as True with Reason Retrying and Message: + """ + error for resolved bundle "single-namespace-operator.1.0.0" with version "1.0.0": + invalid ClusterExtension configuration: invalid configuration: required field "watchNamespace" is missing + """ + When ClusterExtension is updated to set config.watchNamespace field + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: ${NAME} + spec: + namespace: ${TEST_NAMESPACE} + serviceAccount: + name: olm-admin + config: + configType: Inline + inline: + watchNamespace: single-namespace-operator-target # added + source: + sourceType: Catalog + catalog: + packageName: single-namespace-operator + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + """ + Then ClusterExtension reports Installed as True + And bundle "single-namespace-operator.1.0.0" is installed in version "1.0.0" + And operator "single-namespace-operator" target namespace is "single-namespace-operator-target" + + @SingleOwnNamespaceInstallSupport + Scenario: watchNamespace config is required for extension supporting own namespace + Given ServiceAccount "olm-admin" in test namespace is cluster admin + And ClusterExtension is applied without the watchNamespace configuration + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: ${NAME} + spec: + namespace: ${TEST_NAMESPACE} + serviceAccount: + name: olm-admin + source: + sourceType: Catalog + catalog: + packageName: own-namespace-operator + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + """ + And ClusterExtension reports Progressing as True with Reason Retrying and Message: + """ + error for resolved bundle "own-namespace-operator.1.0.0" with version + "1.0.0": invalid ClusterExtension configuration: invalid configuration: required + field "watchNamespace" is missing + """ + And ClusterExtension is updated to include the watchNamespace configuration + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: ${NAME} + spec: + namespace: ${TEST_NAMESPACE} + serviceAccount: + name: olm-admin + config: + configType: Inline + inline: + watchNamespace: some-ns # added, but not own namespace + source: + sourceType: Catalog + catalog: + packageName: own-namespace-operator + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + """ + And ClusterExtension reports Progressing as True with Reason Retrying and Message: + """ + error for resolved bundle "own-namespace-operator.1.0.0" with version + "1.0.0": invalid ClusterExtension configuration: invalid configuration: 'some-ns' + is not valid ownNamespaceInstallMode: invalid value "some-ns": watchNamespace + must be "${TEST_NAMESPACE}" (the namespace where the operator is installed) because this + operator only supports OwnNamespace install mode + """ + When ClusterExtension is updated to set watchNamespace to own namespace value + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: ${NAME} + spec: + namespace: ${TEST_NAMESPACE} + serviceAccount: + name: olm-admin + config: + configType: Inline + inline: + watchNamespace: ${TEST_NAMESPACE} # own namespace + source: + sourceType: Catalog + catalog: + packageName: own-namespace-operator + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + """ + Then ClusterExtension is rolled out + And ClusterExtension is available + And operator "own-namespace-operator" target namespace is "${TEST_NAMESPACE}" + + @WebhookProviderCertManager + Scenario: Install operator having webhooks + Given ServiceAccount "olm-admin" in test namespace is cluster admin + When ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: ${NAME} + spec: + namespace: ${TEST_NAMESPACE} + serviceAccount: + name: olm-admin + source: + sourceType: Catalog + catalog: + packageName: webhook-operator + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + """ + Then ClusterExtension is rolled out + And ClusterExtension is available + And resource apply fails with error msg containing "Invalid value: false: Spec.Valid must be true" + """ + apiVersion: webhook.operators.coreos.io/v1 + kind: WebhookTest + metadata: + name: ${NAME} + namespace: ${TEST_NAMESPACE} + spec: + valid: false # webhook rejects it as invalid value + """ + And resource is applied + """ + apiVersion: webhook.operators.coreos.io/v1 + kind: WebhookTest + metadata: + name: ${NAME} + namespace: ${TEST_NAMESPACE} + spec: + valid: true + """ + And resource "webhooktest/${NAME}" matches + """ + apiVersion: webhook.operators.coreos.io/v2 + kind: WebhookTest + metadata: + name: ${NAME} + namespace: ${TEST_NAMESPACE} + spec: + conversion: + valid: true + mutate: true + """ + And resource "webhooktest.v1.webhook.operators.coreos.io/${NAME}" matches + """ + apiVersion: webhook.operators.coreos.io/v1 + kind: WebhookTest + metadata: + name: ${NAME} + namespace: ${TEST_NAMESPACE} + spec: + valid: true + mutate: true + """ diff --git a/test/e2e/features/metrics.feature b/test/e2e/features/metrics.feature new file mode 100644 index 000000000..ccb719198 --- /dev/null +++ b/test/e2e/features/metrics.feature @@ -0,0 +1,15 @@ +Feature: Exposed various metrics + + Background: + Given OLM is available + + Scenario Outline: component exposes metrics + Given ServiceAccount "metrics-reader" in test namespace has permissions to fetch "" metrics + When ServiceAccount "metrics-reader" sends request to "/metrics" endpoint of "" service + Then Prometheus metrics are returned in the response + + Examples: + | component | + | operator-controller | + | catalogd | + \ No newline at end of file diff --git a/test/e2e/features/recover.feature b/test/e2e/features/recover.feature new file mode 100644 index 000000000..0438f2d1a --- /dev/null +++ b/test/e2e/features/recover.feature @@ -0,0 +1,117 @@ +Feature: Recover cluster extension from errors that might occur during its lifetime + + Background: + Given OLM is available + And ClusterCatalog "test" serves bundles + + + Scenario: Restore removed resource + Given ServiceAccount "olm-sa" with needed permissions is available in ${TEST_NAMESPACE} + And ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: ${NAME} + spec: + namespace: ${TEST_NAMESPACE} + serviceAccount: + name: olm-sa + source: + sourceType: Catalog + catalog: + packageName: test + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + """ + And ClusterExtension is available + And resource "configmap/test-configmap" exists + When resource "configmap/test-configmap" is removed + Then resource "configmap/test-configmap" is eventually restored + + Scenario: Install ClusterExtension after target namespace becomes available + Given ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: ${NAME} + spec: + namespace: ${TEST_NAMESPACE} + serviceAccount: + name: olm-sa + source: + sourceType: Catalog + catalog: + packageName: test + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + """ + And ClusterExtension reports Progressing as True with Reason Retrying + When ServiceAccount "olm-sa" with needed permissions is available in ${TEST_NAMESPACE} + Then ClusterExtension is available + And ClusterExtension reports Progressing as True with Reason Succeeded + + Scenario: Install ClusterExtension after conflicting resource is removed + Given ServiceAccount "olm-sa" with needed permissions is available in ${TEST_NAMESPACE} + And resource is applied + """ + apiVersion: apps/v1 + kind: Deployment + metadata: + name: test-operator + namespace: ${TEST_NAMESPACE} + spec: + replicas: 1 + selector: + matchLabels: + app: test-operator + template: + metadata: + labels: + app: test-operator + spec: + containers: + - command: + - "sleep" + args: + - "1000" + image: busybox:1.36 + imagePullPolicy: IfNotPresent + name: busybox + securityContext: + runAsNonRoot: true + runAsUser: 1000 + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + """ + And ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: ${NAME} + spec: + namespace: ${TEST_NAMESPACE} + serviceAccount: + name: olm-sa + source: + sourceType: Catalog + catalog: + packageName: test + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + """ + And ClusterExtension reports Progressing as True with Reason Retrying + And ClusterExtension reports Installed as False + When resource "deployment/test-operator" is removed + Then ClusterExtension is available + And ClusterExtension reports Progressing as True with Reason Succeeded + And ClusterExtension reports Installed as True diff --git a/test/e2e/features/status.feature b/test/e2e/features/status.feature new file mode 100644 index 000000000..5c8a3141d --- /dev/null +++ b/test/e2e/features/status.feature @@ -0,0 +1,45 @@ +Feature: Report status of the managed ClusterExtension workload + + As an OLM user, I would like to see reported on ClusterExtension the availability + change of the managed workload. + + Background: + Given OLM is available + And ClusterCatalog "test" serves bundles + And ServiceAccount "olm-sa" with needed permissions is available in ${TEST_NAMESPACE} + And ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: ${NAME} + spec: + namespace: ${TEST_NAMESPACE} + serviceAccount: + name: olm-sa + source: + sourceType: Catalog + catalog: + packageName: test + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + version: 1.0.0 + """ + And ClusterExtension is rolled out + And ClusterExtension is available + + @BoxcutterRuntime + Scenario: Report availability change when managed workload is not ready + When resource "deployment/test-operator" reports as not ready + Then ClusterExtension reports Available as False with Reason ProbeFailure + And ClusterExtensionRevision "${NAME}-1" reports Available as False with Reason ProbeFailure + + @BoxcutterRuntime + Scenario: Report availability change when managed workload restores its readiness + Given resource "deployment/test-operator" reports as not ready + And ClusterExtension reports Available as False with Reason ProbeFailure + And ClusterExtensionRevision "${NAME}-1" reports Available as False with Reason ProbeFailure + When resource "deployment/test-operator" reports as ready + Then ClusterExtension is available + And ClusterExtensionRevision "${NAME}-1" reports Available as True with Reason ProbesSucceeded \ No newline at end of file diff --git a/test/e2e/features/update.feature b/test/e2e/features/update.feature new file mode 100644 index 000000000..dee45e32a --- /dev/null +++ b/test/e2e/features/update.feature @@ -0,0 +1,244 @@ +Feature: Update ClusterExtension + + As an OLM user I would like to update a ClusterExtension from a catalog + or get an appropriate information in case of an error. + + Background: + Given OLM is available + And ClusterCatalog "test" serves bundles + And ServiceAccount "olm-sa" with needed permissions is available in ${TEST_NAMESPACE} + + Scenario: Update to a successor version + Given ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: ${NAME} + spec: + namespace: ${TEST_NAMESPACE} + serviceAccount: + name: olm-sa + source: + sourceType: Catalog + catalog: + packageName: test + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + version: 1.0.0 + """ + And ClusterExtension is rolled out + And ClusterExtension is available + When ClusterExtension is updated to version "1.0.1" + Then ClusterExtension is rolled out + And ClusterExtension is available + And bundle "test-operator.1.0.1" is installed in version "1.0.1" + + Scenario: Cannot update extension to non successor version + Given ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: ${NAME} + spec: + namespace: ${TEST_NAMESPACE} + serviceAccount: + name: olm-sa + source: + sourceType: Catalog + catalog: + packageName: test + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + version: 1.0.0 + """ + And ClusterExtension is rolled out + And ClusterExtension is available + When ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: ${NAME} + spec: + namespace: ${TEST_NAMESPACE} + serviceAccount: + name: olm-sa + source: + sourceType: Catalog + catalog: + packageName: test + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + version: 1.2.0 + """ + Then ClusterExtension reports Progressing as True with Reason Retrying and Message: + """ + error upgrading from currently installed version "1.0.0": no bundles found for package "test" matching version "1.2.0" + """ + + Scenario: Force update to non successor version + Given ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: ${NAME} + spec: + namespace: ${TEST_NAMESPACE} + serviceAccount: + name: olm-sa + source: + sourceType: Catalog + catalog: + packageName: test + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + version: 1.0.0 + """ + And ClusterExtension is rolled out + And ClusterExtension is available + When ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: ${NAME} + spec: + namespace: ${TEST_NAMESPACE} + serviceAccount: + name: olm-sa + source: + sourceType: Catalog + catalog: + packageName: test + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + version: 1.2.0 + upgradeConstraintPolicy: SelfCertified + """ + Then ClusterExtension is rolled out + And ClusterExtension is available + And bundle "test-operator.1.2.0" is installed in version "1.2.0" + + @catalog-updates + Scenario: Auto update when new version becomes available in the new catalog image ref + Given ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: ${NAME} + spec: + namespace: ${TEST_NAMESPACE} + serviceAccount: + name: olm-sa + source: + sourceType: Catalog + catalog: + packageName: test + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + """ + And ClusterExtension is rolled out + And ClusterExtension is available + And bundle "test-operator.1.2.0" is installed in version "1.2.0" + When ClusterCatalog "test" is updated to version "v2" + Then bundle "test-operator.1.3.0" is installed in version "1.3.0" + + Scenario: Auto update when new version becomes available in the same catalog image ref + Given "test" catalog image version "v1" is also tagged as "latest" + And ClusterCatalog "test" is updated to version "latest" + And ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: ${NAME} + spec: + namespace: ${TEST_NAMESPACE} + serviceAccount: + name: olm-sa + source: + sourceType: Catalog + catalog: + packageName: test + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + """ + And ClusterExtension is rolled out + And ClusterExtension is available + And bundle "test-operator.1.2.0" is installed in version "1.2.0" + When ClusterCatalog "test" image version "v2" is also tagged as "latest" + Then bundle "test-operator.1.3.0" is installed in version "1.3.0" + + @BoxcutterRuntime + Scenario: Each update creates a new revision + Given ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: ${NAME} + spec: + namespace: ${TEST_NAMESPACE} + serviceAccount: + name: olm-sa + source: + sourceType: Catalog + catalog: + packageName: test + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + version: 1.0.0 + upgradeConstraintPolicy: SelfCertified + """ + And ClusterExtension is rolled out + And ClusterExtension is available + When ClusterExtension is updated to version "1.2.0" + Then bundle "test-operator.1.2.0" is installed in version "1.2.0" + And ClusterExtension is rolled out + And ClusterExtension is available + And ClusterExtension reports "${NAME}-2" as active revision + And ClusterExtensionRevision "${NAME}-2" reports Progressing as True with Reason Succeeded + And ClusterExtensionRevision "${NAME}-2" reports Available as True with Reason ProbesSucceeded + And ClusterExtensionRevision "${NAME}-1" is archived + + @BoxcutterRuntime + Scenario: Report all active revisions on ClusterExtension + Given ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: ${NAME} + spec: + namespace: ${TEST_NAMESPACE} + serviceAccount: + name: olm-sa + source: + sourceType: Catalog + catalog: + packageName: test + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + version: 1.0.0 + upgradeConstraintPolicy: SelfCertified + """ + And ClusterExtension is rolled out + And ClusterExtension is available + When ClusterExtension is updated to version "1.0.2" + Then ClusterExtension reports "${NAME}-1, ${NAME}-2" as active revisions + And ClusterExtensionRevision "${NAME}-2" reports Progressing as True with Reason Succeeded + And ClusterExtensionRevision "${NAME}-2" reports Available as False with Reason ProbeFailure + diff --git a/test/e2e/features_test.go b/test/e2e/features_test.go new file mode 100644 index 000000000..706c822ef --- /dev/null +++ b/test/e2e/features_test.go @@ -0,0 +1,75 @@ +package e2e + +import ( + "fmt" + "log" + "os" + "testing" + + "github.com/cucumber/godog" + "github.com/cucumber/godog/colors" + "github.com/spf13/pflag" + + testutil "github.com/operator-framework/operator-controller/internal/shared/util/test" + "github.com/operator-framework/operator-controller/test/e2e/steps" +) + +var opts = godog.Options{ + Format: "pretty", + Paths: []string{"features"}, + Output: colors.Colored(os.Stdout), + Concurrency: 1, + NoColors: true, +} + +func init() { + godog.BindCommandLineFlags("godog.", &opts) +} + +func TestMain(m *testing.M) { + // parse CLI arguments + pflag.Parse() + opts.Paths = pflag.Args() + + // run tests + sc := godog.TestSuite{ + TestSuiteInitializer: InitializeSuite, + ScenarioInitializer: InitializeScenario, + Options: &opts, + }.Run() + + if st := m.Run(); st > sc { + sc = st + } + switch sc { + // 0 - success + case 0: + + path := os.Getenv("E2E_SUMMARY_OUTPUT") + if path == "" { + fmt.Println("Note: E2E_SUMMARY_OUTPUT is unset; skipping summary generation") + } else { + if err := testutil.PrintSummary(path); err != nil { + // Fail the run if alerts are found + fmt.Printf("%v", err) + os.Exit(1) + } + } + return + + // 1 - failed + // 2 - command line usage error + // 128 - or higher, os signal related error exit codes + default: + log.Fatalf("non-zero status returned (%d), failed to run feature tests", sc) + } +} + +func InitializeSuite(tc *godog.TestSuiteContext) { + tc.BeforeSuite(steps.BeforeSuite) +} + +func InitializeScenario(sc *godog.ScenarioContext) { + steps.RegisterSteps(sc) + steps.RegisterHooks(sc) +} diff --git a/test/e2e/metrics_test.go b/test/e2e/metrics_test.go deleted file mode 100644 index e1fbb90f3..000000000 --- a/test/e2e/metrics_test.go +++ /dev/null @@ -1,297 +0,0 @@ -// Package e2e contains end-to-end tests to verify that the metrics endpoints -// for both components. Metrics are exported and accessible by authorized users through -// RBAC and ServiceAccount tokens. -// -// These tests perform the following steps: -// 1. Create a ClusterRoleBinding to grant necessary permissions for accessing metrics. -// 2. Generate a ServiceAccount token for authentication. -// 3. Deploy a curl pod to interact with the metrics endpoint. -// 4. Wait for the curl pod to become ready. -// 5. Execute a curl command from the pod to validate the metrics endpoint. -// 6. Clean up all resources created during the test, such as the ClusterRoleBinding and curl pod. -// -//nolint:gosec -package e2e - -import ( - "bytes" - "context" - "fmt" - "io" - "os/exec" - "strings" - "testing" - "time" - - "github.com/stretchr/testify/require" - "k8s.io/apimachinery/pkg/util/rand" - - testutil "github.com/operator-framework/operator-controller/internal/shared/util/test" -) - -// TestOperatorControllerMetricsExportedEndpoint verifies that the metrics endpoint for the operator controller -func TestOperatorControllerMetricsExportedEndpoint(t *testing.T) { - client := testutil.FindK8sClient(t) - curlNamespace := createRandomNamespace(t, client) - componentNamespace := getComponentNamespace(t, client, "control-plane=operator-controller-controller-manager") - - config := NewMetricsTestConfig( - client, - curlNamespace, - componentNamespace, - "operator-controller-metrics-reader", - "operator-controller-metrics-binding", - "operator-controller-metrics-reader", - "oper-curl-metrics", - "app.kubernetes.io/name=operator-controller", - operatorControllerMetricsPort, - ) - - config.run(t) -} - -// TestCatalogdMetricsExportedEndpoint verifies that the metrics endpoint for catalogd -func TestCatalogdMetricsExportedEndpoint(t *testing.T) { - client := testutil.FindK8sClient(t) - curlNamespace := createRandomNamespace(t, client) - componentNamespace := getComponentNamespace(t, client, "control-plane=catalogd-controller-manager") - - config := NewMetricsTestConfig( - client, - curlNamespace, - componentNamespace, - "catalogd-metrics-reader", - "catalogd-metrics-binding", - "catalogd-metrics-reader", - "catalogd-curl-metrics", - "app.kubernetes.io/name=catalogd", - catalogdMetricsPort, - ) - - config.run(t) -} - -// MetricsTestConfig holds the necessary configurations for testing metrics endpoints. -type MetricsTestConfig struct { - client string - namespace string - componentNamespace string - clusterRole string - clusterBinding string - serviceAccount string - curlPodName string - componentSelector string - metricsPort int -} - -// NewMetricsTestConfig initializes a new MetricsTestConfig. -func NewMetricsTestConfig(client, namespace, componentNamespace, clusterRole, clusterBinding, serviceAccount, curlPodName, componentSelector string, metricsPort int) *MetricsTestConfig { - return &MetricsTestConfig{ - client: client, - namespace: namespace, - componentNamespace: componentNamespace, - clusterRole: clusterRole, - clusterBinding: clusterBinding, - serviceAccount: serviceAccount, - curlPodName: curlPodName, - componentSelector: componentSelector, - metricsPort: metricsPort, - } -} - -// run will execute all steps of those tests -func (c *MetricsTestConfig) run(t *testing.T) { - defer c.cleanup(t) - - c.createMetricsClusterRoleBinding(t) - token := c.getServiceAccountToken(t) - c.createCurlMetricsPod(t) - c.validate(t, token) -} - -// createMetricsClusterRoleBinding to binding and expose the metrics -func (c *MetricsTestConfig) createMetricsClusterRoleBinding(t *testing.T) { - t.Logf("Creating ClusterRoleBinding %s for %s in namespace %s", c.clusterBinding, c.serviceAccount, c.namespace) - cmd := exec.Command(c.client, "create", "clusterrolebinding", c.clusterBinding, - "--clusterrole="+c.clusterRole, - "--serviceaccount="+c.namespace+":"+c.serviceAccount) - output, err := cmd.CombinedOutput() - require.NoError(t, err, "Error creating ClusterRoleBinding: %s", string(output)) -} - -// getServiceAccountToken return the token requires to have access to the metrics -func (c *MetricsTestConfig) getServiceAccountToken(t *testing.T) string { - t.Logf("Creating ServiceAccount %q in namespace %q", c.serviceAccount, c.namespace) - output, err := exec.Command(c.client, "create", "serviceaccount", c.serviceAccount, "--namespace="+c.namespace).CombinedOutput() - require.NoError(t, err, "Error creating service account: %v", string(output)) - - t.Logf("Generating ServiceAccount token for %q in namespace %q", c.serviceAccount, c.namespace) - cmd := exec.Command(c.client, "create", "token", c.serviceAccount, "--namespace", c.namespace) - tokenOutput, tokenCombinedOutput, err := stdoutAndCombined(cmd) - require.NoError(t, err, "Error creating token: %s", string(tokenCombinedOutput)) - return string(bytes.TrimSpace(tokenOutput)) -} - -// createCurlMetricsPod creates the Pod with curl image to allow check if the metrics are working -func (c *MetricsTestConfig) createCurlMetricsPod(t *testing.T) { - t.Logf("Creating curl pod (%s/%s) to validate the metrics endpoint", c.namespace, c.curlPodName) - cmd := exec.Command(c.client, "run", c.curlPodName, - "--image=quay.io/curl/curl:8.15.0", - "--namespace", c.namespace, - "--restart=Never", - "--overrides", `{ - "spec": { - "terminationGradePeriodSeconds": 0, - "containers": [{ - "name": "curl", - "image": "quay.io/curl/curl:8.15.0", - "command": ["sh", "-c", "sleep 3600"], - "securityContext": { - "allowPrivilegeEscalation": false, - "capabilities": {"drop": ["ALL"]}, - "runAsNonRoot": true, - "runAsUser": 1000, - "seccompProfile": {"type": "RuntimeDefault"} - } - }], - "serviceAccountName": "`+c.serviceAccount+`" - } - }`) - output, err := cmd.CombinedOutput() - require.NoError(t, err, "Error creating curl pod: %s", string(output)) -} - -// validate verifies if is possible to access the metrics from all pods -func (c *MetricsTestConfig) validate(t *testing.T, token string) { - t.Log("Waiting for the curl pod to be ready") - waitCmd := exec.Command(c.client, "wait", "--for=condition=Ready", "pod", c.curlPodName, "--namespace", c.namespace, "--timeout=60s") - waitOutput, waitErr := waitCmd.CombinedOutput() - require.NoError(t, waitErr, "Error waiting for curl pod to be ready: %s", string(waitOutput)) - - // Get all pod IPs for the component - podIPs := c.getComponentPodIPs(t) - require.NotEmpty(t, podIPs, "No pod IPs found for component") - t.Logf("Found %d pod(s) to scrape metrics from", len(podIPs)) - - // Validate metrics endpoint for each pod - for i, podIP := range podIPs { - // Build metrics URL with pod FQDN: ..pod.cluster.local - // Convert IP dots to dashes (e.g., 10.244.0.11 -> 10-244-0-11) - podIPDashes := strings.ReplaceAll(podIP, ".", "-") - metricsURL := fmt.Sprintf("https://%s.%s.pod.cluster.local:%d/metrics", podIPDashes, c.componentNamespace, c.metricsPort) - t.Logf("Validating metrics endpoint for pod %d/%d: %s", i+1, len(podIPs), metricsURL) - - curlCmd := exec.Command(c.client, "exec", c.curlPodName, "--namespace", c.namespace, "--", - "curl", "-v", "-k", "-H", "Authorization: Bearer "+token, metricsURL) - output, err := curlCmd.CombinedOutput() - require.NoError(t, err, "Error calling metrics endpoint %s: %s", metricsURL, string(output)) - require.Contains(t, string(output), "200 OK", "Metrics endpoint %s did not return 200 OK", metricsURL) - t.Logf("Successfully scraped metrics from pod %d/%d", i+1, len(podIPs)) - } -} - -// cleanup removes the created resources. Uses a context with timeout to prevent hangs. -func (c *MetricsTestConfig) cleanup(t *testing.T) { - type objDesc struct { - resourceName string - name string - namespace string - } - objects := []objDesc{ - {"clusterrolebinding", c.clusterBinding, ""}, - {"pod", c.curlPodName, c.namespace}, - {"serviceaccount", c.serviceAccount, c.namespace}, - {"namespace", c.namespace, ""}, - } - - t.Log("Cleaning up resources") - for _, obj := range objects { - args := []string{"delete", obj.resourceName, obj.name, "--ignore-not-found=true", "--force"} - if obj.namespace != "" { - args = append(args, "--namespace", obj.namespace) - } - output, err := exec.Command(c.client, args...).CombinedOutput() - require.NoError(t, err, "Error deleting %q %q in namespace %q: %v", obj.resourceName, obj.name, obj.namespace, string(output)) - } - - // Create a context with a 60-second timeout. - ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) - defer cancel() - - for _, obj := range objects { - err := waitForDeletion(ctx, c.client, obj.resourceName, obj.name, obj.namespace) - require.NoError(t, err, "Error deleting %q %q in namespace %q", obj.resourceName, obj.name, obj.namespace) - t.Logf("Successfully deleted %q %q in namespace %q", obj.resourceName, obj.name, obj.namespace) - } -} - -// waitForDeletion uses "kubectl wait" to block until the specified resource is deleted -// or until the 60-second timeout is reached. -func waitForDeletion(ctx context.Context, client, resourceType, resourceName, resourceNamespace string) error { - args := []string{"wait", "--for=delete", "--timeout=60s", resourceType, resourceName} - if resourceNamespace != "" { - args = append(args, "--namespace", resourceNamespace) - } - cmd := exec.CommandContext(ctx, client, args...) - output, err := cmd.CombinedOutput() - if err != nil { - return fmt.Errorf("error waiting for deletion of %s %s: %v, output: %s", resourceType, resourceName, err, string(output)) - } - return nil -} - -// createRandomNamespace creates a random namespace -func createRandomNamespace(t *testing.T, client string) string { - nsName := fmt.Sprintf("testns-%s", rand.String(8)) - - cmd := exec.Command(client, "create", "namespace", nsName) - output, err := cmd.CombinedOutput() - require.NoError(t, err, "Error creating namespace: %s", string(output)) - - return nsName -} - -// getComponentNamespace returns the namespace where operator-controller or catalogd is running -func getComponentNamespace(t *testing.T, client, selector string) string { - cmd := exec.Command(client, "get", "pods", "--all-namespaces", "--selector="+selector, "--output=jsonpath={.items[0].metadata.namespace}") - output, err := cmd.CombinedOutput() - require.NoError(t, err, "Error determining namespace: %s", string(output)) - - namespace := string(bytes.TrimSpace(output)) - if namespace == "" { - t.Fatal("No namespace found for selector " + selector) - } - return namespace -} - -// getComponentPodIPs returns the IP addresses of all pods matching the component selector -func (c *MetricsTestConfig) getComponentPodIPs(t *testing.T) []string { - cmd := exec.Command(c.client, "get", "pods", - "--namespace="+c.componentNamespace, - "--selector="+c.componentSelector, - "--output=jsonpath={.items[*].status.podIP}") - output, err := cmd.CombinedOutput() - require.NoError(t, err, "Error getting pod IPs: %s", string(output)) - - podIPsStr := string(bytes.TrimSpace(output)) - if podIPsStr == "" { - return []string{} - } - - // Split space-separated IPs - fields := bytes.Fields([]byte(podIPsStr)) - ips := make([]string, len(fields)) - for i, field := range fields { - ips[i] = string(field) - } - return ips -} - -func stdoutAndCombined(cmd *exec.Cmd) ([]byte, []byte, error) { - var outOnly, outAndErr bytes.Buffer - allWriter := io.MultiWriter(&outOnly, &outAndErr) - cmd.Stdout = allWriter - cmd.Stderr = &outAndErr - err := cmd.Run() - return outOnly.Bytes(), outAndErr.Bytes(), err -} diff --git a/test/e2e/network_policy_test.go b/test/e2e/network_policy_test.go deleted file mode 100644 index 8e0465f41..000000000 --- a/test/e2e/network_policy_test.go +++ /dev/null @@ -1,379 +0,0 @@ -package e2e - -import ( - "context" - "fmt" - "strings" - "testing" - - "github.com/stretchr/testify/require" - corev1 "k8s.io/api/core/v1" - networkingv1 "k8s.io/api/networking/v1" - "k8s.io/apimachinery/pkg/api/equality" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/utils/ptr" - "sigs.k8s.io/controller-runtime/pkg/client" - - testutil "github.com/operator-framework/operator-controller/internal/shared/util/test" -) - -const ( - minJustificationLength = 40 - catalogdManagerSelector = "control-plane=catalogd-controller-manager" - operatorManagerSelector = "control-plane=operator-controller-controller-manager" - catalogdMetricsPort = 7443 - catalogdWebhookPort = 9443 - catalogServerPort = 8443 - operatorControllerMetricsPort = 8443 -) - -type portWithJustification struct { - port []networkingv1.NetworkPolicyPort - justification string -} - -// ingressRule defines a k8s IngressRule, along with a justification. -type ingressRule struct { - ports []portWithJustification - from []networkingv1.NetworkPolicyPeer -} - -// egressRule defines a k8s egressRule, along with a justification. -type egressRule struct { - ports []portWithJustification - to []networkingv1.NetworkPolicyPeer -} - -// AllowedPolicyDefinition defines the expected structure and justifications for a NetworkPolicy. -type allowedPolicyDefinition struct { - selector metav1.LabelSelector - policyTypes []networkingv1.PolicyType - ingressRule ingressRule - egressRule egressRule - denyAllIngressJustification string // Justification if Ingress is in PolicyTypes and IngressRules is empty - denyAllEgressJustification string // Justification if Egress is in PolicyTypes and EgressRules is empty -} - -var denyAllPolicySpec = allowedPolicyDefinition{ - selector: metav1.LabelSelector{}, // Empty selector, matches all pods - policyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeIngress, networkingv1.PolicyTypeEgress}, - // No IngressRules means deny all ingress if PolicyTypeIngress is present - // No EgressRules means deny all egress if PolicyTypeEgress is present - denyAllIngressJustification: "Denies all ingress traffic to pods selected by this policy by default, unless explicitly allowed by other policy rules, ensuring a baseline secure posture.", - denyAllEgressJustification: "Denies all egress traffic from pods selected by this policy by default, unless explicitly allowed by other policy rules, minimizing potential exfiltration paths.", -} - -var prometheuSpec = allowedPolicyDefinition{ - selector: metav1.LabelSelector{MatchLabels: map[string]string{"app.kubernetes.io/name": "prometheus"}}, - policyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeIngress, networkingv1.PolicyTypeEgress}, - ingressRule: ingressRule{ - ports: []portWithJustification{ - { - port: nil, - justification: "Allows access to the prometheus pod", - }, - }, - }, - egressRule: egressRule{ - ports: []portWithJustification{ - { - port: nil, - justification: "Allows prometheus to access other pods", - }, - }, - }, -} - -// Ref: https://docs.google.com/document/d/1bHEEWzA65u-kjJFQRUY1iBuMIIM1HbPy4MeDLX4NI3o/edit?usp=sharing -var allowedNetworkPolicies = map[string]allowedPolicyDefinition{ - "catalogd-controller-manager": { - selector: metav1.LabelSelector{MatchLabels: map[string]string{"control-plane": "catalogd-controller-manager"}}, - policyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeIngress, networkingv1.PolicyTypeEgress}, - ingressRule: ingressRule{ - ports: []portWithJustification{ - { - port: []networkingv1.NetworkPolicyPort{{Protocol: ptr.To(corev1.ProtocolTCP), Port: &intstr.IntOrString{Type: intstr.Int, IntVal: catalogdMetricsPort}}}, - justification: "Allows Prometheus to scrape metrics from catalogd, which is essential for monitoring its performance and health.", - }, - { - port: []networkingv1.NetworkPolicyPort{{Protocol: ptr.To(corev1.ProtocolTCP), Port: &intstr.IntOrString{Type: intstr.Int, IntVal: catalogdWebhookPort}}}, - justification: "Permits Kubernetes API server to reach catalogd's mutating admission webhook, ensuring integrity of catalog resources.", - }, - { - port: []networkingv1.NetworkPolicyPort{{Protocol: ptr.To(corev1.ProtocolTCP), Port: &intstr.IntOrString{Type: intstr.Int, IntVal: catalogServerPort}}}, - justification: "Enables clients (eg. operator-controller) to query catalog metadata from catalogd, which is a core function for bundle resolution and operator discovery.", - }, - }, - }, - egressRule: egressRule{ - ports: []portWithJustification{ - { - port: nil, // Empty Ports means allow all egress - justification: "Permits catalogd to fetch catalog images from arbitrary container registries and communicate with the Kubernetes API server for its operational needs.", - }, - }, - }, - }, - "operator-controller-controller-manager": { - selector: metav1.LabelSelector{MatchLabels: map[string]string{"control-plane": "operator-controller-controller-manager"}}, - policyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeIngress, networkingv1.PolicyTypeEgress}, - ingressRule: ingressRule{ - ports: []portWithJustification{ - { - port: []networkingv1.NetworkPolicyPort{{Protocol: ptr.To(corev1.ProtocolTCP), Port: &intstr.IntOrString{Type: intstr.Int, IntVal: operatorControllerMetricsPort}}}, - justification: "Allows Prometheus to scrape metrics from operator-controller, which is crucial for monitoring its activity, reconciliations, and overall health.", - }, - }, - }, - egressRule: egressRule{ - ports: []portWithJustification{ - { - port: nil, // Empty Ports means allow all egress - justification: "Enables operator-controller to pull bundle images from arbitrary image registries, connect to catalogd's HTTPS server for metadata, and interact with the Kubernetes API server.", - }, - }, - }, - }, -} - -func TestNetworkPolicyJustifications(t *testing.T) { - ctx := context.Background() - - // Validate justifications have min length in the allowedNetworkPolicies definition - for name, policyDef := range allowedNetworkPolicies { - for i, pwj := range policyDef.ingressRule.ports { - require.GreaterOrEqualf(t, len(pwj.justification), minJustificationLength, - "Justification for ingress PortWithJustification entry %d in policy %q is too short: %q", i, name, pwj.justification) - } - for i, pwj := range policyDef.egressRule.ports { // Corrected variable name from 'rule' to 'pwj' - require.GreaterOrEqualf(t, len(pwj.justification), minJustificationLength, - "Justification for egress PortWithJustification entry %d in policy %q is too short: %q", i, name, pwj.justification) - } - if policyDef.denyAllIngressJustification != "" { - require.GreaterOrEqualf(t, len(policyDef.denyAllIngressJustification), minJustificationLength, - "DenyAllIngressJustification for policy %q is too short: %q", name, policyDef.denyAllIngressJustification) - } - if policyDef.denyAllEgressJustification != "" { - require.GreaterOrEqualf(t, len(policyDef.denyAllEgressJustification), minJustificationLength, - "DenyAllEgressJustification for policy %q is too short: %q", name, policyDef.denyAllEgressJustification) - } - } - - clientForComponent := testutil.FindK8sClient(t) - - operatorControllerNamespace := getComponentNamespace(t, clientForComponent, operatorManagerSelector) - catalogDNamespace := getComponentNamespace(t, clientForComponent, catalogdManagerSelector) - - policies := &networkingv1.NetworkPolicyList{} - err := c.List(ctx, policies, client.InNamespace(operatorControllerNamespace)) - require.NoError(t, err, "Failed to list NetworkPolicies in namespace %q", operatorControllerNamespace) - - clusterPolicies := policies.Items - - if operatorControllerNamespace != catalogDNamespace { - policies := &networkingv1.NetworkPolicyList{} - err := c.List(ctx, policies, client.InNamespace(catalogDNamespace)) - require.NoError(t, err, "Failed to list NetworkPolicies in namespace %q", catalogDNamespace) - clusterPolicies = append(clusterPolicies, policies.Items...) - - t.Log("Detected dual-namespace configuration, expecting two prefixed 'default-deny-all-traffic' policies.") - allowedNetworkPolicies["catalogd-default-deny-all-traffic"] = denyAllPolicySpec - allowedNetworkPolicies["operator-controller-default-deny-all-traffic"] = denyAllPolicySpec - } else { - t.Log("Detected single-namespace configuration, expecting one 'default-deny-all-traffic' policy.") - allowedNetworkPolicies["default-deny-all-traffic"] = denyAllPolicySpec - t.Log("Detected single-namespace configuration, expecting 'prometheus' policy.") - allowedNetworkPolicies["prometheus"] = prometheuSpec - } - - validatedRegistryPolicies := make(map[string]bool) - - for _, policy := range clusterPolicies { - t.Run(fmt.Sprintf("Policy_%s", strings.ReplaceAll(policy.Name, "-", "_")), func(t *testing.T) { - expectedPolicy, found := allowedNetworkPolicies[policy.Name] - require.Truef(t, found, "NetworkPolicy %q found in cluster but not in allowed registry. Namespace: %s", policy.Name, policy.Namespace) - validatedRegistryPolicies[policy.Name] = true - - // 1. Compare PodSelector - require.True(t, equality.Semantic.DeepEqual(expectedPolicy.selector, policy.Spec.PodSelector), - "PodSelector mismatch for policy %q. Expected: %+v, Got: %+v", policy.Name, expectedPolicy.selector, policy.Spec.PodSelector) - - // 2. Compare PolicyTypes - require.ElementsMatchf(t, expectedPolicy.policyTypes, policy.Spec.PolicyTypes, - "PolicyTypes mismatch for policy %q.", policy.Name) - - // 3. Validate Ingress Rules - hasIngressPolicyType := false - for _, pt := range policy.Spec.PolicyTypes { - if pt == networkingv1.PolicyTypeIngress { - hasIngressPolicyType = true - break - } - } - - if hasIngressPolicyType { - switch len(policy.Spec.Ingress) { - case 0: - validateDenyAllIngress(t, policy.Name, expectedPolicy) - case 1: - validateSingleIngressRule(t, policy.Name, policy.Spec.Ingress[0], expectedPolicy) - default: - require.Failf(t, "Policy %q in cluster has %d ingress rules. Allowed definition supports at most 1 explicit ingress rule.", policy.Name, len(policy.Spec.Ingress)) - } - } else { - validateNoIngress(t, policy.Name, policy, expectedPolicy) - } - - // 4. Validate Egress Rules - hasEgressPolicyType := false - for _, pt := range policy.Spec.PolicyTypes { - if pt == networkingv1.PolicyTypeEgress { - hasEgressPolicyType = true - break - } - } - - if hasEgressPolicyType { - switch len(policy.Spec.Egress) { - case 0: - validateDenyAllEgress(t, policy.Name, expectedPolicy) - case 1: - validateSingleEgressRule(t, policy.Name, policy.Spec.Egress[0], expectedPolicy) - default: - require.Failf(t, "Policy %q in cluster has %d egress rules. Allowed definition supports at most 1 explicit egress rule.", policy.Name, len(policy.Spec.Egress)) - } - } else { - validateNoEgress(t, policy, expectedPolicy) - } - }) - } - - // 5. Ensure all policies in the registry were found in the cluster - require.Len(t, validatedRegistryPolicies, len(allowedNetworkPolicies), - "Mismatch between number of expected policies in registry (%d) and number of policies found & validated in cluster (%d). Missing policies from registry: %v", len(allowedNetworkPolicies), len(validatedRegistryPolicies), missingPolicies(allowedNetworkPolicies, validatedRegistryPolicies)) -} - -func missingPolicies(expected map[string]allowedPolicyDefinition, actual map[string]bool) []string { - missing := []string{} - for k := range expected { - if !actual[k] { - missing = append(missing, k) - } - } - return missing -} - -// validateNoEgress confirms that a policy which does not have spec.PolicyType=Egress specified -// has no corresponding egress rules or expectations defined. -func validateNoEgress(t *testing.T, policy networkingv1.NetworkPolicy, expectedPolicy allowedPolicyDefinition) { - // Policy is NOT expected to affect Egress traffic (no Egress in PolicyTypes) - // Expected: Cluster has no egress rules; Registry has no DenyAllEgressJustification and empty EgressRule. - require.Emptyf(t, policy.Spec.Egress, - "Policy %q: Cluster does not have Egress PolicyType, but has Egress rules defined.", policy.Name) - require.Emptyf(t, expectedPolicy.denyAllEgressJustification, - "Policy %q: Cluster does not have Egress PolicyType. Registry's DenyAllEgressJustification is not empty.", policy.Name) - require.Emptyf(t, expectedPolicy.egressRule.ports, - "Policy %q: Cluster does not have Egress PolicyType. Registry's EgressRule.Ports is not empty.", policy.Name) - require.Emptyf(t, expectedPolicy.egressRule.to, - "Policy %q: Cluster does not have Egress PolicyType. Registry's EgressRule.To is not empty.", policy.Name) -} - -// validateDenyAllEgress confirms that a policy with Egress PolicyType but no explicit rules -// correctly corresponds to a "deny all" expectation. -func validateDenyAllEgress(t *testing.T, policyName string, expectedPolicy allowedPolicyDefinition) { - // Cluster: PolicyType Egress is present, but no explicit egress rules -> Deny All Egress by this policy. - // Expected: DenyAllEgressJustification is set; EgressRule.Ports and .To are empty. - require.NotEmptyf(t, expectedPolicy.denyAllEgressJustification, - "Policy %q: Cluster has Egress PolicyType but no rules (deny all). Registry's DenyAllEgressJustification is empty.", policyName) - require.Emptyf(t, expectedPolicy.egressRule.ports, - "Policy %q: Cluster has Egress PolicyType but no rules (deny all). Registry's EgressRule.Ports is not empty.", policyName) - require.Emptyf(t, expectedPolicy.egressRule.to, - "Policy %q: Cluster has Egress PolicyType but no rules (deny all). Registry's EgressRule.To is not empty.", policyName) -} - -// validateSingleEgressRule validates a policy that has exactly one explicit egress rule, -// distinguishing between "allow-all" and more specific rules. -func validateSingleEgressRule(t *testing.T, policyName string, clusterEgressRule networkingv1.NetworkPolicyEgressRule, expectedPolicy allowedPolicyDefinition) { - // Cluster: PolicyType Egress is present, and there's one explicit egress rule. - // Expected: DenyAllEgressJustification is empty; EgressRule matches the cluster's rule. - expectedEgressRule := expectedPolicy.egressRule - - require.Emptyf(t, expectedPolicy.denyAllEgressJustification, - "Policy %q: Cluster has a specific Egress rule. Registry's DenyAllEgressJustification should be empty.", policyName) - - isClusterRuleAllowAllPorts := len(clusterEgressRule.Ports) == 0 - isClusterRuleAllowAllPeers := len(clusterEgressRule.To) == 0 - - if isClusterRuleAllowAllPorts && isClusterRuleAllowAllPeers { // Handles egress: [{}] - allow all ports to all peers - require.Lenf(t, expectedEgressRule.ports, 1, - "Policy %q (allow-all egress): Expected EgressRule.Ports to have 1 justification entry, got %d", policyName, len(expectedEgressRule.ports)) - if len(expectedEgressRule.ports) == 1 { // Guard against panic - require.Nilf(t, expectedEgressRule.ports[0].port, - "Policy %q (allow-all egress): Expected EgressRule.Ports[0].Port to be nil, got %+v", policyName, expectedEgressRule.ports[0].port) - } - require.Conditionf(t, func() bool { return len(expectedEgressRule.to) == 0 }, - "Policy %q (allow-all egress): Expected EgressRule.To to be empty for allow-all peers, got %+v", policyName, expectedEgressRule.to) - } else { - // Specific egress rule (not the simple allow-all ports and allow-all peers) - require.True(t, equality.Semantic.DeepEqual(expectedEgressRule.to, clusterEgressRule.To), - "Policy %q, Egress Rule: 'To' mismatch.\nExpected: %+v\nGot: %+v", policyName, expectedEgressRule.to, clusterEgressRule.To) - - var allExpectedPortsFromPwJ []networkingv1.NetworkPolicyPort - for _, pwj := range expectedEgressRule.ports { - allExpectedPortsFromPwJ = append(allExpectedPortsFromPwJ, pwj.port...) - } - require.ElementsMatchf(t, allExpectedPortsFromPwJ, clusterEgressRule.Ports, - "Policy %q, Egress Rule: 'Ports' mismatch (aggregated from PortWithJustification). Expected: %+v, Got: %+v", policyName, allExpectedPortsFromPwJ, clusterEgressRule.Ports) - } -} - -// validateNoIngress confirms that a policy which does not have the Ingress PolicyType -// has no corresponding ingress rules or expectations defined. -func validateNoIngress(t *testing.T, policyName string, clusterPolicy networkingv1.NetworkPolicy, expectedPolicy allowedPolicyDefinition) { - // Policy is NOT expected to affect Ingress traffic (no Ingress in PolicyTypes) - // Expected: Cluster has no ingress rules; Registry has no DenyAllIngressJustification and empty IngressRule. - require.Emptyf(t, clusterPolicy.Spec.Ingress, - "Policy %q: Cluster does not have Ingress PolicyType, but has Ingress rules defined.", policyName) - require.Emptyf(t, expectedPolicy.denyAllIngressJustification, - "Policy %q: Cluster does not have Ingress PolicyType. Registry's DenyAllIngressJustification is not empty.", policyName) - require.Emptyf(t, expectedPolicy.ingressRule.ports, - "Policy %q: Cluster does not have Ingress PolicyType. Registry's IngressRule.Ports is not empty.", policyName) - require.Emptyf(t, expectedPolicy.ingressRule.from, - "Policy %q: Cluster does not have Ingress PolicyType. Registry's IngressRule.From is not empty.", policyName) -} - -// validateDenyAllIngress confirms that a policy with Ingress PolicyType but no explicit rules -// correctly corresponds to a "deny all" expectation. -func validateDenyAllIngress(t *testing.T, policyName string, expectedPolicy allowedPolicyDefinition) { - // Cluster: PolicyType Ingress is present, but no explicit ingress rules -> Deny All Ingress by this policy. - // Expected: DenyAllIngressJustification is set; IngressRule.Ports and .From are empty. - require.NotEmptyf(t, expectedPolicy.denyAllIngressJustification, - "Policy %q: Cluster has Ingress PolicyType but no rules (deny all). Registry's DenyAllIngressJustification is empty.", policyName) - require.Emptyf(t, expectedPolicy.ingressRule.ports, - "Policy %q: Cluster has Ingress PolicyType but no rules (deny all). Registry's IngressRule.Ports is not empty.", policyName) - require.Emptyf(t, expectedPolicy.ingressRule.from, - "Policy %q: Cluster has Ingress PolicyType but no rules (deny all). Registry's IngressRule.From is not empty.", policyName) -} - -// validateSingleIngressRule validates a policy that has exactly one explicit ingress rule. -func validateSingleIngressRule(t *testing.T, policyName string, clusterIngressRule networkingv1.NetworkPolicyIngressRule, expectedPolicy allowedPolicyDefinition) { - // Cluster: PolicyType Ingress is present, and there's one explicit ingress rule. - // Expected: DenyAllIngressJustification is empty; IngressRule matches the cluster's rule. - expectedIngressRule := expectedPolicy.ingressRule - - require.Emptyf(t, expectedPolicy.denyAllIngressJustification, - "Policy %q: Cluster has a specific Ingress rule. Registry's DenyAllIngressJustification should be empty.", policyName) - - // Compare 'From' - require.True(t, equality.Semantic.DeepEqual(expectedIngressRule.from, clusterIngressRule.From), - "Policy %q, Ingress Rule: 'From' mismatch.\nExpected: %+v\nGot: %+v", policyName, expectedIngressRule.from, clusterIngressRule.From) - - // Compare 'Ports' by aggregating the ports from our justified structure - var allExpectedPortsFromPwJ []networkingv1.NetworkPolicyPort - for _, pwj := range expectedIngressRule.ports { - allExpectedPortsFromPwJ = append(allExpectedPortsFromPwJ, pwj.port...) - } - require.ElementsMatchf(t, allExpectedPortsFromPwJ, clusterIngressRule.Ports, - "Policy %q, Ingress Rule: 'Ports' mismatch (aggregated from PortWithJustification). Expected: %+v, Got: %+v", policyName, allExpectedPortsFromPwJ, clusterIngressRule.Ports) -} diff --git a/test/e2e/single_namespace_support_test.go b/test/e2e/single_namespace_support_test.go deleted file mode 100644 index 190e786ba..000000000 --- a/test/e2e/single_namespace_support_test.go +++ /dev/null @@ -1,412 +0,0 @@ -package e2e - -import ( - "context" - "fmt" - "os" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" - apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - apimeta "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/utils/ptr" - - ocv1 "github.com/operator-framework/operator-controller/api/v1" - testutil "github.com/operator-framework/operator-controller/internal/shared/util/test" - . "github.com/operator-framework/operator-controller/test/helpers" -) - -const ( - soNsFlag = "SingleOwnNamespaceInstallSupport" -) - -func TestClusterExtensionSingleNamespaceSupport(t *testing.T) { - SkipIfFeatureGateDisabled(t, soNsFlag) - t.Log("Test support for cluster extension config") - defer testutil.CollectTestArtifacts(t, artifactName, c, cfg) - - t.Log("By creating install namespace, watch namespace and necessary rbac resources") - namespace := corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "single-namespace-operator", - }, - } - require.NoError(t, c.Create(t.Context(), &namespace)) - t.Cleanup(func() { - require.NoError(t, c.Delete(context.Background(), &namespace)) - }) - - watchNamespace := corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "single-namespace-operator-target", - }, - } - require.NoError(t, c.Create(t.Context(), &watchNamespace)) - t.Cleanup(func() { - require.NoError(t, c.Delete(context.Background(), &watchNamespace)) - }) - - serviceAccount := corev1.ServiceAccount{ - ObjectMeta: metav1.ObjectMeta{ - Name: "single-namespace-operator-installer", - Namespace: namespace.GetName(), - }, - } - require.NoError(t, c.Create(t.Context(), &serviceAccount)) - t.Cleanup(func() { - require.NoError(t, c.Delete(context.Background(), &serviceAccount)) - }) - - clusterRoleBinding := &rbacv1.ClusterRoleBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: "single-namespace-operator-installer", - }, - Subjects: []rbacv1.Subject{ - { - Kind: "ServiceAccount", - APIGroup: corev1.GroupName, - Name: serviceAccount.GetName(), - Namespace: serviceAccount.GetNamespace(), - }, - }, - RoleRef: rbacv1.RoleRef{ - APIGroup: rbacv1.GroupName, - Kind: "ClusterRole", - Name: "cluster-admin", - }, - } - require.NoError(t, c.Create(t.Context(), clusterRoleBinding)) - t.Cleanup(func() { - require.NoError(t, c.Delete(context.Background(), clusterRoleBinding)) - }) - - t.Log("By creating the test-catalog ClusterCatalog") - extensionCatalog := &ocv1.ClusterCatalog{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-catalog", - }, - Spec: ocv1.ClusterCatalogSpec{ - Source: ocv1.CatalogSource{ - Type: ocv1.SourceTypeImage, - Image: &ocv1.ImageSource{ - Ref: fmt.Sprintf("%s/e2e/test-catalog:v1", os.Getenv("CLUSTER_REGISTRY_HOST")), - PollIntervalMinutes: ptr.To(1), - }, - }, - }, - } - require.NoError(t, c.Create(t.Context(), extensionCatalog)) - t.Cleanup(func() { - require.NoError(t, c.Delete(context.Background(), extensionCatalog)) - }) - - t.Log("By waiting for the catalog to serve its metadata") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: extensionCatalog.GetName()}, extensionCatalog)) - cond := apimeta.FindStatusCondition(extensionCatalog.Status.Conditions, ocv1.TypeServing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonAvailable, cond.Reason) - }, pollDuration, pollInterval) - - t.Log("By attempting to install the single-namespace-operator ClusterExtension without any configuration") - clusterExtension := &ocv1.ClusterExtension{ - ObjectMeta: metav1.ObjectMeta{ - Name: "single-namespace-operator-extension", - }, - Spec: ocv1.ClusterExtensionSpec{ - Source: ocv1.SourceConfig{ - SourceType: "Catalog", - Catalog: &ocv1.CatalogFilter{ - PackageName: "single-namespace-operator", - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"olm.operatorframework.io/metadata.name": extensionCatalog.Name}, - }, - }, - }, - Namespace: namespace.GetName(), - ServiceAccount: ocv1.ServiceAccountReference{ - Name: serviceAccount.GetName(), - }, - }, - } - require.NoError(t, c.Create(t.Context(), clusterExtension)) - t.Cleanup(func() { - require.NoError(t, c.Delete(context.Background(), clusterExtension)) - }) - - t.Log("By waiting for single-namespace-operator extension installation to fail") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(t.Context(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonRetrying, cond.Reason) - require.Contains(ct, cond.Message, `required field "watchNamespace" is missing`) - }, pollDuration, pollInterval) - - t.Log("By updating the ClusterExtension configuration with a watchNamespace") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(t, c.Get(t.Context(), types.NamespacedName{Name: clusterExtension.GetName()}, clusterExtension)) - clusterExtension.Spec.Config = &ocv1.ClusterExtensionConfig{ - ConfigType: ocv1.ClusterExtensionConfigTypeInline, - Inline: &apiextensionsv1.JSON{ - Raw: []byte(fmt.Sprintf(`{"watchNamespace": "%s"}`, watchNamespace.GetName())), - }, - } - require.NoError(t, c.Update(t.Context(), clusterExtension)) - }, pollDuration, pollInterval) - - t.Log("By waiting for single-namespace-operator extension to be installed successfully") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(t.Context(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - require.Contains(ct, cond.Message, "Installed bundle") - require.NotNil(ct, clusterExtension.Status.Install) - require.NotEmpty(ct, clusterExtension.Status.Install.Bundle) - }, pollDuration, pollInterval) - - t.Log("By ensuring the single-namespace-operator deployment is correctly configured to watch the watch namespace") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - deployment := &appsv1.Deployment{} - require.NoError(ct, c.Get(t.Context(), types.NamespacedName{Namespace: namespace.GetName(), Name: "single-namespace-operator"}, deployment)) - require.NotNil(ct, deployment.Spec.Template.GetAnnotations()) - require.Equal(ct, watchNamespace.GetName(), deployment.Spec.Template.GetAnnotations()["olm.targetNamespaces"]) - }, pollDuration, pollInterval) -} - -func TestClusterExtensionOwnNamespaceSupport(t *testing.T) { - SkipIfFeatureGateDisabled(t, soNsFlag) - t.Log("Test support for cluster extension with OwnNamespace install mode support") - defer testutil.CollectTestArtifacts(t, artifactName, c, cfg) - - t.Log("By creating install namespace, watch namespace and necessary rbac resources") - namespace := corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "own-namespace-operator", - }, - } - require.NoError(t, c.Create(t.Context(), &namespace)) - t.Cleanup(func() { - require.NoError(t, c.Delete(context.Background(), &namespace)) - }) - - serviceAccount := corev1.ServiceAccount{ - ObjectMeta: metav1.ObjectMeta{ - Name: "own-namespace-operator-installer", - Namespace: namespace.GetName(), - }, - } - require.NoError(t, c.Create(t.Context(), &serviceAccount)) - t.Cleanup(func() { - require.NoError(t, c.Delete(context.Background(), &serviceAccount)) - }) - - clusterRoleBinding := &rbacv1.ClusterRoleBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: "own-namespace-operator-installer", - }, - Subjects: []rbacv1.Subject{ - { - Kind: "ServiceAccount", - APIGroup: corev1.GroupName, - Name: serviceAccount.GetName(), - Namespace: serviceAccount.GetNamespace(), - }, - }, - RoleRef: rbacv1.RoleRef{ - APIGroup: rbacv1.GroupName, - Kind: "ClusterRole", - Name: "cluster-admin", - }, - } - require.NoError(t, c.Create(t.Context(), clusterRoleBinding)) - t.Cleanup(func() { - require.NoError(t, c.Delete(context.Background(), clusterRoleBinding)) - }) - - t.Log("By creating the test-catalog ClusterCatalog") - extensionCatalog := &ocv1.ClusterCatalog{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-catalog", - }, - Spec: ocv1.ClusterCatalogSpec{ - Source: ocv1.CatalogSource{ - Type: ocv1.SourceTypeImage, - Image: &ocv1.ImageSource{ - Ref: fmt.Sprintf("%s/e2e/test-catalog:v1", os.Getenv("CLUSTER_REGISTRY_HOST")), - PollIntervalMinutes: ptr.To(1), - }, - }, - }, - } - require.NoError(t, c.Create(t.Context(), extensionCatalog)) - t.Cleanup(func() { - require.NoError(t, c.Delete(context.Background(), extensionCatalog)) - }) - - t.Log("By waiting for the catalog to serve its metadata") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: extensionCatalog.GetName()}, extensionCatalog)) - cond := apimeta.FindStatusCondition(extensionCatalog.Status.Conditions, ocv1.TypeServing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonAvailable, cond.Reason) - }, pollDuration, pollInterval) - - t.Log("By attempting to install the own-namespace-operator ClusterExtension without any configuration") - clusterExtension := &ocv1.ClusterExtension{ - ObjectMeta: metav1.ObjectMeta{ - Name: "own-namespace-operator-extension", - }, - Spec: ocv1.ClusterExtensionSpec{ - Source: ocv1.SourceConfig{ - SourceType: "Catalog", - Catalog: &ocv1.CatalogFilter{ - PackageName: "own-namespace-operator", - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"olm.operatorframework.io/metadata.name": extensionCatalog.Name}, - }, - }, - }, - Namespace: namespace.GetName(), - ServiceAccount: ocv1.ServiceAccountReference{ - Name: serviceAccount.GetName(), - }, - }, - } - require.NoError(t, c.Create(t.Context(), clusterExtension)) - t.Cleanup(func() { - require.NoError(t, c.Delete(context.Background(), clusterExtension)) - }) - - t.Log("By waiting for own-namespace-operator extension installation to fail") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(t.Context(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonRetrying, cond.Reason) - require.Contains(ct, cond.Message, `required field "watchNamespace" is missing`) - }, pollDuration, pollInterval) - - t.Log("By updating the ClusterExtension configuration with a watchNamespace other than the install namespace") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(t, c.Get(t.Context(), types.NamespacedName{Name: clusterExtension.GetName()}, clusterExtension)) - clusterExtension.Spec.Config = &ocv1.ClusterExtensionConfig{ - ConfigType: ocv1.ClusterExtensionConfigTypeInline, - Inline: &apiextensionsv1.JSON{ - Raw: []byte(`{"watchNamespace": "some-namespace"}`), - }, - } - require.NoError(t, c.Update(t.Context(), clusterExtension)) - }, pollDuration, pollInterval) - - t.Log("By waiting for own-namespace-operator extension installation to fail") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(t.Context(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonRetrying, cond.Reason) - require.Contains(ct, cond.Message, "invalid ClusterExtension configuration") - require.Contains(ct, cond.Message, fmt.Sprintf("watchNamespace must be \"%s\"", clusterExtension.Spec.Namespace)) - require.Contains(ct, cond.Message, "OwnNamespace install mode") - }, pollDuration, pollInterval) - - t.Log("By updating the ClusterExtension configuration with a watchNamespace = install namespace") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(t, c.Get(t.Context(), types.NamespacedName{Name: clusterExtension.GetName()}, clusterExtension)) - clusterExtension.Spec.Config = &ocv1.ClusterExtensionConfig{ - ConfigType: ocv1.ClusterExtensionConfigTypeInline, - Inline: &apiextensionsv1.JSON{ - Raw: []byte(fmt.Sprintf(`{"watchNamespace": "%s"}`, clusterExtension.Spec.Namespace)), - }, - } - require.NoError(t, c.Update(t.Context(), clusterExtension)) - }, pollDuration, pollInterval) - - t.Log("By waiting for own-namespace-operator extension to be installed successfully") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(t.Context(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - require.Contains(ct, cond.Message, "Installed bundle") - require.NotNil(ct, clusterExtension.Status.Install) - require.NotEmpty(ct, clusterExtension.Status.Install.Bundle) - }, pollDuration, pollInterval) - - t.Log("By ensuring the own-namespace-operator deployment is correctly configured to watch the watch namespace") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - deployment := &appsv1.Deployment{} - require.NoError(ct, c.Get(t.Context(), types.NamespacedName{Namespace: namespace.GetName(), Name: "own-namespace-operator"}, deployment)) - require.NotNil(ct, deployment.Spec.Template.GetAnnotations()) - require.Equal(ct, clusterExtension.Spec.Namespace, deployment.Spec.Template.GetAnnotations()["olm.targetNamespaces"]) - }, pollDuration, pollInterval) -} - -func TestClusterExtensionVersionUpdate(t *testing.T) { - SkipIfFeatureGateDisabled(t, soNsFlag) - t.Log("When a cluster extension is installed from a catalog") - t.Log("When resolving upgrade edges") - - clusterExtension, extensionCatalog, sa, ns := TestInit(t) - defer TestCleanup(t, extensionCatalog, clusterExtension, sa, ns) - defer testutil.CollectTestArtifacts(t, artifactName, c, cfg) - - t.Log("By creating an ClusterExtension at a specified version") - clusterExtension.Spec = ocv1.ClusterExtensionSpec{ - Source: ocv1.SourceConfig{ - SourceType: "Catalog", - Catalog: &ocv1.CatalogFilter{ - PackageName: "test", - Version: "1.0.0", - }, - }, - Namespace: ns.Name, - ServiceAccount: ocv1.ServiceAccountReference{ - Name: sa.Name, - }, - } - require.NoError(t, c.Create(context.Background(), clusterExtension)) - t.Log("By eventually reporting a successful resolution") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - }, pollDuration, pollInterval) - - t.Log("It allows to upgrade the ClusterExtension to a non-successor version") - t.Log("By forcing update of ClusterExtension resource to a non-successor version") - // 1.2.0 does not replace/skip/skipRange 1.0.0. - clusterExtension.Spec.Source.Catalog.Version = "1.2.0" - clusterExtension.Spec.Source.Catalog.UpgradeConstraintPolicy = ocv1.UpgradeConstraintPolicySelfCertified - require.NoError(t, c.Update(context.Background(), clusterExtension)) - t.Log("By eventually reporting a satisfiable resolution") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeProgressing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - }, pollDuration, pollInterval) - t.Log("We should have two ClusterExtensionRevision resources") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - cerList := &ocv1.ClusterExtensionRevisionList{} - require.NoError(ct, c.List(context.Background(), cerList)) - require.Len(ct, cerList.Items, 2) - }, pollDuration, pollInterval) -} diff --git a/test/e2e/steps/hooks.go b/test/e2e/steps/hooks.go new file mode 100644 index 000000000..c91072947 --- /dev/null +++ b/test/e2e/steps/hooks.go @@ -0,0 +1,166 @@ +package steps + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "os/exec" + "regexp" + "strconv" + + "github.com/cucumber/godog" + "github.com/go-logr/logr" + "github.com/spf13/pflag" + appsv1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/component-base/featuregate" + "k8s.io/klog/v2/textlogger" + + "github.com/operator-framework/operator-controller/internal/operator-controller/features" +) + +type resource struct { + name string + kind string +} + +type scenarioContext struct { + id string + namespace string + clusterExtensionName string + removedResources []unstructured.Unstructured + backGroundCmds []*exec.Cmd + metricsResponse map[string]string +} + +type contextKey string + +const ( + scenarioContextKey contextKey = "scenario-context" +) + +var ( + devMode = false + featureGates = map[featuregate.Feature]bool{ + features.WebhookProviderCertManager: true, + features.PreflightPermissions: false, + features.SingleOwnNamespaceInstallSupport: true, + features.SyntheticPermissions: false, + features.WebhookProviderOpenshiftServiceCA: false, + features.HelmChartSupport: false, + features.BoxcutterRuntime: false, + } + logger logr.Logger +) + +func init() { + flagSet := pflag.CommandLine + flagSet.BoolVar(&devMode, "log.debug", false, "print debug log level") +} + +func RegisterHooks(sc *godog.ScenarioContext) { + sc.Before(CheckFeatureTags) + sc.Before(CreateScenarioContext) + + sc.After(ScenarioCleanup) +} + +func BeforeSuite() { + if devMode { + logger = textlogger.NewLogger(textlogger.NewConfig(textlogger.Verbosity(1))) + } else { + logger = textlogger.NewLogger(textlogger.NewConfig()) + } + + raw, err := k8sClient("get", "deployments", "-A", "-l", "app.kubernetes.io/part-of=olm", "-o", "jsonpath={.items}") + if err != nil { + panic(fmt.Errorf("failed to get OLM deployments: %v", err)) + } + dl := []appsv1.Deployment{} + if err := json.Unmarshal([]byte(raw), &dl); err != nil { + panic(fmt.Errorf("failed to unmarshal OLM deployments: %v", err)) + } + var olm *appsv1.Deployment + + for _, d := range dl { + if d.Name == olmDeploymentName { + olm = &d + olmNamespace = d.Namespace + break + } + } + + featureGatePattern := regexp.MustCompile(`--feature-gates=([[:alnum:]]+)=(true|false)`) + for _, c := range olm.Spec.Template.Spec.Containers { + if c.Name == "manager" { + for _, arg := range c.Args { + if matches := featureGatePattern.FindStringSubmatch(arg); matches != nil { + v, err := strconv.ParseBool(matches[2]) + if err != nil { + panic(fmt.Errorf("failed to parse feature gate %q=%q: %v", matches[1], matches[2], err)) + } + featureGates[featuregate.Feature(matches[1])] = v + } + } + } + } + logger.Info(fmt.Sprintf("Enabled feature gates: %v", featureGates)) +} + +func CheckFeatureTags(ctx context.Context, sc *godog.Scenario) (context.Context, error) { + for _, tag := range sc.Tags { + if enabled, found := featureGates[featuregate.Feature(tag.Name[1:])]; found && !enabled { + logger.Info(fmt.Sprintf("Skipping scenario %q because feature gate %q is disabled", sc.Name, tag.Name[1:])) + return ctx, godog.ErrSkip + } + } + return ctx, nil +} + +func CreateScenarioContext(ctx context.Context, sc *godog.Scenario) (context.Context, error) { + scCtx := &scenarioContext{ + id: sc.Id, + namespace: fmt.Sprintf("ns-%s", sc.Id), + clusterExtensionName: fmt.Sprintf("ce-%s", sc.Id), + } + return context.WithValue(ctx, scenarioContextKey, scCtx), nil +} + +func scenarioCtx(ctx context.Context) *scenarioContext { + return ctx.Value(scenarioContextKey).(*scenarioContext) +} + +func stderrOutput(err error) string { + var exitErr *exec.ExitError + if errors.As(err, &exitErr) && exitErr != nil { + return string(exitErr.Stderr) + } + return "" +} + +func ScenarioCleanup(ctx context.Context, _ *godog.Scenario, err error) (context.Context, error) { + sc := scenarioCtx(ctx) + for _, bgCmd := range sc.backGroundCmds { + if p := bgCmd.Process; p != nil { + _ = p.Kill() + } + } + if err != nil { + return ctx, err + } + + forDeletion := []resource{} + if sc.clusterExtensionName != "" { + forDeletion = append(forDeletion, resource{name: sc.clusterExtensionName, kind: "clusterextension"}) + } + forDeletion = append(forDeletion, resource{name: sc.namespace, kind: "namespace"}) + go func() { + for _, r := range forDeletion { + if _, err := k8sClient("delete", r.kind, r.name, "--ignore-not-found=true"); err != nil { + logger.Info("Error deleting resource", "name", r.name, "namespace", sc.namespace, "stderr", stderrOutput(err)) + } + } + }() + return ctx, nil +} diff --git a/test/e2e/steps/steps.go b/test/e2e/steps/steps.go new file mode 100644 index 000000000..e1a55934d --- /dev/null +++ b/test/e2e/steps/steps.go @@ -0,0 +1,732 @@ +package steps + +import ( + "bytes" + "context" + "crypto/tls" + "encoding/json" + "fmt" + "io" + "net" + "net/http" + "os" + "os/exec" + "path/filepath" + "reflect" + "strings" + "time" + + "github.com/cucumber/godog" + jsonpatch "github.com/evanphx/json-patch" + "github.com/google/go-cmp/cmp" + "github.com/google/go-containerregistry/pkg/crane" + "github.com/prometheus/common/expfmt" + "github.com/prometheus/common/model" + "github.com/spf13/pflag" + "github.com/stretchr/testify/require" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/utils/ptr" + "sigs.k8s.io/yaml" + + ocv1 "github.com/operator-framework/operator-controller/api/v1" +) + +const ( + olmDeploymentName = "operator-controller-controller-manager" + timeout = 5 * time.Minute + tick = 1 * time.Second +) + +var ( + olmNamespace = "olmv1-system" + kubeconfigPath string + k8sCli string +) + +func RegisterSteps(sc *godog.ScenarioContext) { + sc.Step(`^OLM is available$`, OLMisAvailable) + sc.Step(`^(?i)bundle "([^"]+)" is installed in version "([^"]+)"$`, BundleInstalled) + + sc.Step(`^(?i)ClusterExtension is applied(?:\s+.*)?$`, ResourceIsApplied) + sc.Step(`^(?i)ClusterExtension is updated to version "([^"]+)"$`, ClusterExtensionVersionUpdate) + sc.Step(`^(?i)ClusterExtension is updated(?:\s+.*)?$`, ResourceIsApplied) + sc.Step(`^(?i)ClusterExtension is available$`, ClusterExtensionIsAvailable) + sc.Step(`^(?i)ClusterExtension is rolled out$`, ClusterExtensionIsRolledOut) + sc.Step(`^(?i)ClusterExtension reports "([^"]+)" as active revision(s?)$`, ClusterExtensionReportsActiveRevisions) + sc.Step(`^(?i)ClusterExtension reports ([[:alnum:]]+) as ([[:alnum:]]+) with Reason ([[:alnum:]]+) and Message:$`, ClusterExtensionReportsCondition) + sc.Step(`^(?i)ClusterExtension reports ([[:alnum:]]+) as ([[:alnum:]]+) with Reason ([[:alnum:]]+)$`, ClusterExtensionReportsConditionWithoutMsg) + sc.Step(`^(?i)ClusterExtension reports ([[:alnum:]]+) as ([[:alnum:]]+)$`, ClusterExtensionReportsConditionWithoutReason) + sc.Step(`^(?i)ClusterExtensionRevision "([^"]+)" reports ([[:alnum:]]+) as ([[:alnum:]]+) with Reason ([[:alnum:]]+)$`, ClusterExtensionRevisionReportsConditionWithoutMsg) + sc.Step(`^(?i)ClusterExtensionRevision "([^"]+)" is archived$`, ClusterExtensionRevisionIsArchived) + + sc.Step(`^(?i)resource "([^"]+)" is installed$`, ResourceAvailable) + sc.Step(`^(?i)resource "([^"]+)" is available$`, ResourceAvailable) + sc.Step(`^(?i)resource "([^"]+)" is removed$`, ResourceRemoved) + sc.Step(`^(?i)resource "([^"]+)" exists$`, ResourceAvailable) + sc.Step(`^(?i)resource is applied$`, ResourceIsApplied) + sc.Step(`^(?i)resource "deployment/test-operator" reports as (not ready|ready)$`, MarkTestOperatorNotReady) + + sc.Step(`^(?i)resource apply fails with error msg containing "([^"]+)"$`, ResourceApplyFails) + sc.Step(`^(?i)resource "([^"]+)" is eventually restored$`, ResourceRestored) + sc.Step(`^(?i)resource "([^"]+)" matches$`, ResourceMatches) + + sc.Step(`^(?i)ServiceAccount "([^"]*)" with needed permissions is available in test namespace$`, ServiceAccountWithNeededPermissionsIsAvailableInNamespace) + sc.Step(`^(?i)ServiceAccount "([^"]*)" with needed permissions is available in \${TEST_NAMESPACE}$`, ServiceAccountWithNeededPermissionsIsAvailableInNamespace) + sc.Step(`^(?i)ServiceAccount "([^"]*)" in test namespace is cluster admin$`, ServiceAccountWithClusterAdminPermissionsIsAvailableInNamespace) + sc.Step(`^(?i)ServiceAccount "([^"]+)" in test namespace has permissions to fetch "([^"]+)" metrics$`, ServiceAccountWithFetchMetricsPermissions) + sc.Step(`^(?i)ServiceAccount "([^"]+)" sends request to "([^"]+)" endpoint of "([^"]+)" service$`, SendMetricsRequest) + + sc.Step(`^"([^"]+)" catalog is updated to version "([^"]+)"$`, CatalogIsUpdatedToVersion) + sc.Step(`^(?i)ClusterCatalog "([^"]+)" is updated to version "([^"]+)"$`, CatalogIsUpdatedToVersion) + sc.Step(`^"([^"]+)" catalog serves bundles$`, CatalogServesBundles) + sc.Step(`^(?i)ClusterCatalog "([^"]+)" serves bundles$`, CatalogServesBundles) + sc.Step(`^"([^"]+)" catalog image version "([^"]+)" is also tagged as "([^"]+)"$`, TagCatalogImage) + sc.Step(`^(?i)ClusterCatalog "([^"]+)" image version "([^"]+)" is also tagged as "([^"]+)"$`, TagCatalogImage) + + sc.Step(`^(?i)operator "([^"]+)" target namespace is "([^"]+)"$`, OperatorTargetNamespace) + sc.Step(`^(?i)Prometheus metrics are returned in the response$`, PrometheusMetricsAreReturned) +} + +func init() { + flagSet := pflag.CommandLine + flagSet.StringVar(&k8sCli, "k8s.cli", "kubectl", "Path to k8s cli") + if v, found := os.LookupEnv("KUBECONFIG"); found { + kubeconfigPath = v + } else { + home, err := os.UserHomeDir() + if err != nil { + panic(fmt.Sprintf("cannot determine user home directory: %v", err)) + } + flagSet.StringVar(&kubeconfigPath, "kubeconfig", filepath.Join(home, ".kube", "config"), "Paths to a kubeconfig. Only required if out-of-cluster.") + } +} + +func k8sClient(args ...string) (string, error) { + cmd := exec.Command(k8sCli, args...) + logger.V(1).Info("Running", "command", strings.Join(cmd.Args, " ")) + cmd.Env = append(os.Environ(), fmt.Sprintf("KUBECONFIG=%s", kubeconfigPath)) + b, err := cmd.Output() + if err != nil { + logger.V(1).Info("Failed to run", "command", strings.Join(cmd.Args, " "), "stderr", stderrOutput(err), "error", err) + } + output := string(b) + logger.V(1).Info("Output", "command", strings.Join(cmd.Args, " "), "output", output) + return output, err +} + +func k8scliWithInput(yaml string, args ...string) (string, error) { + cmd := exec.Command(k8sCli, args...) + cmd.Stdin = bytes.NewBufferString(yaml) + cmd.Env = append(os.Environ(), fmt.Sprintf("KUBECONFIG=%s", kubeconfigPath)) + b, err := cmd.Output() + return string(b), err +} + +func OLMisAvailable(ctx context.Context) error { + require.Eventually(godog.T(ctx), func() bool { + v, err := k8sClient("get", "deployment", "-n", olmNamespace, olmDeploymentName, "-o", "jsonpath='{.status.conditions[?(@.type==\"Available\")].status}'") + if err != nil { + return false + } + return v == "'True'" + }, timeout, tick) + return nil +} + +func BundleInstalled(ctx context.Context, name, version string) error { + sc := scenarioCtx(ctx) + waitFor(ctx, func() bool { + v, err := k8sClient("get", "clusterextension", sc.clusterExtensionName, "-o", "jsonpath={.status.install.bundle}") + if err != nil { + return false + } + var bundle map[string]interface{} + if err := json.Unmarshal([]byte(v), &bundle); err != nil { + return false + } + return bundle["name"] == name && bundle["version"] == version + }) + return nil +} + +func toUnstructured(yamlContent string) (*unstructured.Unstructured, error) { + var u map[string]any + if err := yaml.Unmarshal([]byte(yamlContent), &u); err != nil { + return nil, err + } + return &unstructured.Unstructured{Object: u}, nil +} + +func substituteScenarioVars(content string, sc *scenarioContext) string { + vars := map[string]string{ + "TEST_NAMESPACE": sc.namespace, + "NAME": sc.clusterExtensionName, + "CATALOG_IMG": "docker-registry.operator-controller-e2e.svc.cluster.local:5000/e2e/test-catalog:v1", + } + if v, found := os.LookupEnv("CATALOG_IMG"); found { + vars["CATALOG_IMG"] = v + } + m := func(k string) string { + if v, found := vars[k]; found { + return v + } + return "" + } + return os.Expand(content, m) +} + +func ResourceApplyFails(ctx context.Context, errMsg string, yamlTemplate *godog.DocString) error { + sc := scenarioCtx(ctx) + yamlContent := substituteScenarioVars(yamlTemplate.Content, sc) + _, err := toUnstructured(yamlContent) + if err != nil { + return fmt.Errorf("failed to parse resource yaml: %v", err) + } + waitFor(ctx, func() bool { + _, err := k8scliWithInput(yamlContent, "apply", "-f", "-") + if err == nil { + return false + } + if stdErr := stderrOutput(err); !strings.Contains(stdErr, errMsg) { + return false + } + return true + }) + return nil +} + +func ClusterExtensionVersionUpdate(ctx context.Context, version string) error { + sc := scenarioCtx(ctx) + patch := map[string]any{ + "spec": map[string]any{ + "source": map[string]any{ + "catalog": map[string]any{ + "version": version, + }, + }, + }, + } + pb, err := json.Marshal(patch) + if err != nil { + return err + } + _, err = k8sClient("patch", "clusterextension", sc.clusterExtensionName, "--type", "merge", "-p", string(pb)) + return err +} + +func ResourceIsApplied(ctx context.Context, yamlTemplate *godog.DocString) error { + sc := scenarioCtx(ctx) + yamlContent := substituteScenarioVars(yamlTemplate.Content, sc) + res, err := toUnstructured(yamlContent) + if err != nil { + return fmt.Errorf("failed to parse resource yaml: %v", err) + } + out, err := k8scliWithInput(yamlContent, "apply", "-f", "-") + if err != nil { + return fmt.Errorf("failed to apply resource %v %w", out, err) + } + if res.GetKind() == "ClusterExtension" { + sc.clusterExtensionName = res.GetName() + } + return nil +} + +func ClusterExtensionIsAvailable(ctx context.Context) error { + sc := scenarioCtx(ctx) + require.Eventually(godog.T(ctx), func() bool { + v, err := k8sClient("get", "clusterextension", sc.clusterExtensionName, "-o", "jsonpath={.status.conditions[?(@.type==\"Installed\")].status}") + if err != nil { + return false + } + return v == "True" + }, timeout, tick) + return nil +} + +func ClusterExtensionIsRolledOut(ctx context.Context) error { + sc := scenarioCtx(ctx) + require.Eventually(godog.T(ctx), func() bool { + v, err := k8sClient("get", "clusterextension", sc.clusterExtensionName, "-o", "jsonpath={.status.conditions[?(@.type==\"Progressing\")]}") + if err != nil { + return false + } + + var condition map[string]interface{} + if err := json.Unmarshal([]byte(v), &condition); err != nil { + return false + } + return condition["status"] == "True" && condition["reason"] == "Succeeded" && condition["type"] == "Progressing" + }, timeout, tick) + return nil +} + +func waitFor(ctx context.Context, conditionFn func() bool) { + require.Eventually(godog.T(ctx), conditionFn, timeout, tick) +} + +func waitForCondition(ctx context.Context, resourceType, resourceName, conditionType, conditionStatus string, conditionReason *string, msg *string) error { + require.Eventually(godog.T(ctx), func() bool { + v, err := k8sClient("get", resourceType, resourceName, "-o", fmt.Sprintf("jsonpath={.status.conditions[?(@.type==\"%s\")]}", conditionType)) + if err != nil { + return false + } + + var condition map[string]interface{} + if err := json.Unmarshal([]byte(v), &condition); err != nil { + return false + } + if condition["status"] != conditionStatus { + return false + } + if conditionReason != nil && condition["reason"] != *conditionReason { + return false + } + if msg != nil && condition["message"] != *msg { + return false + } + + return true + }, timeout, tick) + return nil +} + +func waitForExtensionCondition(ctx context.Context, conditionType, conditionStatus string, conditionReason *string, msg *string) error { + sc := scenarioCtx(ctx) + return waitForCondition(ctx, "clusterextension", sc.clusterExtensionName, conditionType, conditionStatus, conditionReason, msg) +} + +func ClusterExtensionReportsCondition(ctx context.Context, conditionType, conditionStatus, conditionReason string, msg *godog.DocString) error { + var conditionMsg *string + if msg != nil { + conditionMsg = ptr.To(substituteScenarioVars(strings.Join(strings.Fields(msg.Content), " "), scenarioCtx(ctx))) + } + return waitForExtensionCondition(ctx, conditionType, conditionStatus, &conditionReason, conditionMsg) +} + +func ClusterExtensionReportsConditionWithoutMsg(ctx context.Context, conditionType, conditionStatus, conditionReason string) error { + return ClusterExtensionReportsCondition(ctx, conditionType, conditionStatus, conditionReason, nil) +} + +func ClusterExtensionReportsConditionWithoutReason(ctx context.Context, conditionType, conditionStatus string) error { + return waitForExtensionCondition(ctx, conditionType, conditionStatus, nil, nil) +} + +func ClusterExtensionReportsActiveRevisions(ctx context.Context, rawRevisionNames string) error { + sc := scenarioCtx(ctx) + expectedRevisionNames := sets.New[string]() + for _, rev := range strings.Split(rawRevisionNames, ",") { + expectedRevisionNames.Insert(substituteScenarioVars(strings.TrimSpace(rev), sc)) + } + + waitFor(ctx, func() bool { + v, err := k8sClient("get", "clusterextension", sc.clusterExtensionName, "-o", "jsonpath={.status.activeRevisions}") + if err != nil { + return false + } + var activeRevisions []ocv1.RevisionStatus + if err := json.Unmarshal([]byte(v), &activeRevisions); err != nil { + return false + } + activeRevisionsNames := sets.New[string]() + for _, rev := range activeRevisions { + activeRevisionsNames.Insert(rev.Name) + } + return activeRevisionsNames.Equal(expectedRevisionNames) + }) + return nil +} + +func ClusterExtensionRevisionReportsConditionWithoutMsg(ctx context.Context, revisionName, conditionType, conditionStatus, conditionReason string) error { + return waitForCondition(ctx, "clusterextensionrevision", substituteScenarioVars(revisionName, scenarioCtx(ctx)), conditionType, conditionStatus, &conditionReason, nil) +} + +func ClusterExtensionRevisionIsArchived(ctx context.Context, revisionName string) error { + return waitForCondition(ctx, "clusterextensionrevision", substituteScenarioVars(revisionName, scenarioCtx(ctx)), "Progressing", "False", ptr.To("Archived"), nil) +} + +func ResourceAvailable(ctx context.Context, resource string) error { + sc := scenarioCtx(ctx) + resource = substituteScenarioVars(resource, sc) + rtype, name, found := strings.Cut(resource, "/") + if !found { + return fmt.Errorf("resource %s is not in the format /", resource) + } + waitFor(ctx, func() bool { + _, err := k8sClient("get", rtype, name, "-n", sc.namespace) + return err == nil + }) + return nil +} + +func ResourceRemoved(ctx context.Context, resource string) error { + sc := scenarioCtx(ctx) + rtype, name, found := strings.Cut(resource, "/") + if !found { + return fmt.Errorf("resource %s is not in the format /", resource) + } + yaml, err := k8sClient("get", rtype, name, "-n", sc.namespace, "-o", "yaml") + if err != nil { + return err + } + obj, err := toUnstructured(yaml) + if err != nil { + return err + } + sc.removedResources = append(sc.removedResources, *obj) + _, err = k8sClient("delete", rtype, name, "-n", sc.namespace) + return err +} + +func ResourceMatches(ctx context.Context, resource string, requiredContentTemplate *godog.DocString) error { + sc := scenarioCtx(ctx) + resource = substituteScenarioVars(resource, sc) + rtype, name, found := strings.Cut(resource, "/") + if !found { + return fmt.Errorf("resource %s is not in the format /", resource) + } + requiredContent, err := toUnstructured(substituteScenarioVars(requiredContentTemplate.Content, sc)) + if err != nil { + return fmt.Errorf("failed to parse required resource yaml: %v", err) + } + waitFor(ctx, func() bool { + objJson, err := k8sClient("get", rtype, name, "-n", sc.namespace, "-o", "json") + if err != nil { + return false + } + obj, err := toUnstructured(objJson) + if err != nil { + return false + } + patch, err := json.Marshal(requiredContent.Object) + if err != nil { + return false + } + updJson, err := jsonpatch.MergePatch([]byte(objJson), patch) + if err != nil { + return false + } + upd, err := toUnstructured(string(updJson)) + if err != nil { + return false + } + + return len(cmp.Diff(upd.Object, obj.Object)) == 0 + }) + return nil +} + +func ResourceRestored(ctx context.Context, resource string) error { + sc := scenarioCtx(ctx) + rtype, name, found := strings.Cut(resource, "/") + if !found { + return fmt.Errorf("resource %s is not in the format /", resource) + } + waitFor(ctx, func() bool { + yaml, err := k8sClient("get", rtype, name, "-n", sc.namespace, "-o", "yaml") + if err != nil { + return false + } + obj, err := toUnstructured(yaml) + if err != nil { + return false + } + ct := obj.GetCreationTimestamp() + + for i, removed := range sc.removedResources { + rct := removed.GetCreationTimestamp() + if removed.GetName() == obj.GetName() && removed.GetKind() == obj.GetKind() && rct.Before(&ct) { + switch rtype { + case "configmap": + if !reflect.DeepEqual(removed.Object["data"], obj.Object["data"]) { + return false + } + default: + if !reflect.DeepEqual(removed.Object["spec"], obj.Object["spec"]) { + return false + } + } + sc.removedResources = append(sc.removedResources[:i], sc.removedResources[i+1:]...) + return true + } + } + return false + }) + return nil +} + +func applyPermissionsToServiceAccount(ctx context.Context, serviceAccount, rbacTemplate string, keyValue ...string) error { + sc := scenarioCtx(ctx) + yamlContent, err := os.ReadFile(filepath.Join("steps", "testdata", rbacTemplate)) + if err != nil { + return fmt.Errorf("failed to read RBAC template yaml: %v", err) + } + + vars := map[string]string{ + "TEST_NAMESPACE": sc.namespace, + "SERVICE_ACCOUNT_NAME": serviceAccount, + "SERVICEACCOUNT_NAME": serviceAccount, + "CLUSTER_EXTENSION_NAME": sc.clusterExtensionName, + "CLUSTEREXTENSION_NAME": sc.clusterExtensionName, + } + if len(keyValue) > 0 { + for i := 0; i < len(keyValue); i += 2 { + vars[keyValue[i]] = keyValue[i+1] + } + } + m := func(k string) string { + if v, found := vars[k]; found { + return v + } + return "" + } + + // Replace template variables + yaml := os.Expand(string(yamlContent), m) + + // Apply the RBAC configuration + _, err = k8scliWithInput(yaml, "apply", "-f", "-") + if err != nil { + return fmt.Errorf("failed to apply RBAC configuration: %v: %s", err, stderrOutput(err)) + } + + return nil +} + +func ServiceAccountWithNeededPermissionsIsAvailableInNamespace(ctx context.Context, serviceAccount string) error { + return applyPermissionsToServiceAccount(ctx, serviceAccount, "rbac-template.yaml") +} + +func ServiceAccountWithClusterAdminPermissionsIsAvailableInNamespace(ctx context.Context, serviceAccount string) error { + return applyPermissionsToServiceAccount(ctx, serviceAccount, "cluster-admin-rbac-template.yaml") +} + +func ServiceAccountWithFetchMetricsPermissions(ctx context.Context, serviceAccount string, controllerName string) error { + return applyPermissionsToServiceAccount(ctx, serviceAccount, "metrics-reader-rbac-template.yaml", "CONTROLLER_NAME", controllerName) +} + +func httpGet(url string, token string) (*http.Response, error) { + tr := &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, //nolint:gosec // we don't care about the certificate + } + client := &http.Client{Transport: tr} + + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + req.Header.Set("Authorization", "Bearer "+token) + + resp, err := client.Do(req) + if err != nil { + return nil, err + } + return resp, nil +} + +func randomAvailablePort() (int, error) { + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return 0, err + } + defer l.Close() + return l.Addr().(*net.TCPAddr).Port, nil +} + +func SendMetricsRequest(ctx context.Context, serviceAccount string, endpoint string, controllerName string) error { + sc := scenarioCtx(ctx) + serviceNs, err := k8sClient("get", "service", "-A", "-o", fmt.Sprintf(`jsonpath={.items[?(@.metadata.name=="%s-service")].metadata.namespace}`, controllerName)) + if err != nil { + return err + } + v, err := k8sClient("get", "service", "-n", serviceNs, fmt.Sprintf("%s-service", controllerName), "-o", "json") + if err != nil { + return err + } + var service corev1.Service + if err := json.Unmarshal([]byte(v), &service); err != nil { + return err + } + podNameCmd := []string{"get", "pod", "-n", olmNamespace, "-o", "jsonpath={.items}"} + for k, v := range service.Spec.Selector { + podNameCmd = append(podNameCmd, fmt.Sprintf("--selector=%s=%s", k, v)) + } + v, err = k8sClient(podNameCmd...) + if err != nil { + return err + } + + var pods []corev1.Pod + if err := json.Unmarshal([]byte(v), &pods); err != nil { + return err + } + token, err := k8sClient("create", "token", serviceAccount, "-n", sc.namespace) + if err != nil { + return err + } + var metricsPort int32 + for _, p := range service.Spec.Ports { + if p.Name == "metrics" { + metricsPort = p.Port + break + } + } + sc.metricsResponse = make(map[string]string) + for _, p := range pods { + port, err := randomAvailablePort() + if err != nil { + return err + } + portForwardCmd := exec.Command(k8sCli, "port-forward", "-n", p.Namespace, fmt.Sprintf("pod/%s", p.Name), fmt.Sprintf("%d:%d", port, metricsPort)) //nolint:gosec // perfectly safe to start port-forwarder for provided controller name + logger.V(1).Info("starting port-forward", "command", strings.Join(portForwardCmd.Args, " ")) + if err := portForwardCmd.Start(); err != nil { + logger.Error(err, fmt.Sprintf("failed to start port-forward for pod %s", p.Name)) + return err + } + waitFor(ctx, func() bool { + resp, err := httpGet(fmt.Sprintf("https://localhost:%d%s", port, endpoint), token) + if err != nil { + return false + } + defer resp.Body.Close() + + if resp.StatusCode == http.StatusOK { + b, err := io.ReadAll(resp.Body) + if err != nil { + return false + } + sc.metricsResponse[p.Name] = string(b) + return true + } + b, err := io.ReadAll(resp.Body) + if err != nil { + return false + } + logger.V(1).Info("failed to get metrics", "pod", p.Name, "response", string(b)) + return false + }) + if err := portForwardCmd.Process.Kill(); err != nil { + return err + } + if _, err := portForwardCmd.Process.Wait(); err != nil { + return err + } + } + + return nil +} + +func CatalogIsUpdatedToVersion(name, version string) error { + ref, err := k8sClient("get", "clustercatalog", fmt.Sprintf("%s-catalog", name), "-o", "jsonpath={.spec.source.image.ref}") + if err != nil { + return err + } + i := strings.LastIndexByte(ref, ':') + if i == -1 { + return fmt.Errorf("failed to find tag in image reference %s", ref) + } + base := ref[:i] + patch := map[string]any{ + "spec": map[string]any{ + "source": map[string]any{ + "image": map[string]any{ + "ref": fmt.Sprintf("%s:%s", base, version), + }, + }, + }, + } + pb, err := json.Marshal(patch) + if err != nil { + return err + } + _, err = k8sClient("patch", "clustercatalog", fmt.Sprintf("%s-catalog", name), "--type", "merge", "-p", string(pb)) + return err +} + +func CatalogServesBundles(ctx context.Context, catalogName string) error { + yamlContent, err := os.ReadFile(filepath.Join("steps", "testdata", fmt.Sprintf("%s-catalog-template.yaml", catalogName))) + if err != nil { + return fmt.Errorf("failed to read catalog yaml: %v", err) + } + + _, err = k8scliWithInput(substituteScenarioVars(string(yamlContent), scenarioCtx(ctx)), "apply", "-f", "-") + if err != nil { + return fmt.Errorf("failed to apply catalog: %v", err) + } + + return nil +} + +func TagCatalogImage(name, oldTag, newTag string) error { + imageRef := fmt.Sprintf("%s/%s", os.Getenv("LOCAL_REGISTRY_HOST"), fmt.Sprintf("e2e/%s-catalog:%s", name, oldTag)) + return crane.Tag(imageRef, newTag, crane.Insecure) +} + +func PrometheusMetricsAreReturned(ctx context.Context) error { + sc := scenarioCtx(ctx) + for podName, mr := range sc.metricsResponse { + if mr == "" { + return fmt.Errorf("metrics response is empty for pod %s", podName) + } + parser := expfmt.NewTextParser(model.UTF8Validation) + metricsFamilies, err := parser.TextToMetricFamilies(strings.NewReader(mr)) + if err != nil { + return fmt.Errorf("failed to parse metrics response for pod %s: %v", podName, err) + } + if len(metricsFamilies) == 0 { + return fmt.Errorf("metrics response does not contain any metrics for pod %s", podName) + } + } + return nil +} + +func OperatorTargetNamespace(ctx context.Context, operator, namespace string) error { + sc := scenarioCtx(ctx) + namespace = substituteScenarioVars(namespace, sc) + raw, err := k8sClient("get", "deployment", "-n", sc.namespace, operator, "-o", "json") + if err != nil { + return err + } + d := &appsv1.Deployment{} + if err := json.Unmarshal([]byte(raw), d); err != nil { + return err + } + + if tns := d.Spec.Template.Annotations["olm.targetNamespaces"]; tns != namespace { + return fmt.Errorf("expected target namespace %s, got %s", namespace, tns) + } + return nil +} + +func MarkTestOperatorNotReady(ctx context.Context, state string) error { + sc := scenarioCtx(ctx) + v, err := k8sClient("get", "deployment", "-n", sc.namespace, "test-operator", "-o", "jsonpath={.spec.selector.matchLabels}") + if err != nil { + return err + } + var labels map[string]string + if err := json.Unmarshal([]byte(v), &labels); err != nil { + return err + } + podNameCmd := []string{"get", "pod", "-n", sc.namespace, "-o", "jsonpath={.items[0].metadata.name}"} + for k, v := range labels { + podNameCmd = append(podNameCmd, fmt.Sprintf("--selector=%s=%s", k, v)) + } + podName, err := k8sClient(podNameCmd...) + if err != nil { + return err + } + var op string + switch state { + case "not ready": + op = "rm" + case "ready": + op = "touch" + default: + return fmt.Errorf("invalid state %s", state) + } + _, err = k8sClient("exec", podName, "-n", sc.namespace, "--", op, "/var/www/ready") + return err +} diff --git a/test/e2e/steps/testdata/cluster-admin-rbac-template.yaml b/test/e2e/steps/testdata/cluster-admin-rbac-template.yaml new file mode 100644 index 000000000..c020c7ca5 --- /dev/null +++ b/test/e2e/steps/testdata/cluster-admin-rbac-template.yaml @@ -0,0 +1,24 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: ${TEST_NAMESPACE} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ${SERVICEACCOUNT_NAME} + namespace: ${TEST_NAMESPACE} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: ${TEST_NAMESPACE}-${SERVICEACCOUNT_NAME}-cluster-admin-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: + - kind: ServiceAccount + name: ${SERVICEACCOUNT_NAME} + namespace: ${TEST_NAMESPACE} diff --git a/test/e2e/steps/testdata/extra-catalog-template.yaml b/test/e2e/steps/testdata/extra-catalog-template.yaml new file mode 100644 index 000000000..a43d9b324 --- /dev/null +++ b/test/e2e/steps/testdata/extra-catalog-template.yaml @@ -0,0 +1,11 @@ +apiVersion: olm.operatorframework.io/v1 +kind: ClusterCatalog +metadata: + name: extra-catalog +spec: + priority: 0 + source: + type: Image + image: + pollIntervalMinutes: 1 + ref: ${CATALOG_IMG} diff --git a/test/e2e/steps/testdata/metrics-reader-rbac-template.yaml b/test/e2e/steps/testdata/metrics-reader-rbac-template.yaml new file mode 100644 index 000000000..4001f8681 --- /dev/null +++ b/test/e2e/steps/testdata/metrics-reader-rbac-template.yaml @@ -0,0 +1,24 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: ${TEST_NAMESPACE} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ${SERVICEACCOUNT_NAME} + namespace: ${TEST_NAMESPACE} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: ${CONTROLLER_NAME}-metrics-reader-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ${CONTROLLER_NAME}-metrics-reader +subjects: + - kind: ServiceAccount + name: ${SERVICEACCOUNT_NAME} + namespace: ${TEST_NAMESPACE} diff --git a/test/e2e/steps/testdata/rbac-template.yaml b/test/e2e/steps/testdata/rbac-template.yaml new file mode 100644 index 000000000..d975d7698 --- /dev/null +++ b/test/e2e/steps/testdata/rbac-template.yaml @@ -0,0 +1,77 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: ${TEST_NAMESPACE} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ${SERVICEACCOUNT_NAME} + namespace: ${TEST_NAMESPACE} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: ${TEST_NAMESPACE}-${SERVICEACCOUNT_NAME}-olm-admin-clusterrole +rules: + - apiGroups: [olm.operatorframework.io] + resources: [clusterextensions, clusterextensions/finalizers] + resourceNames: ["${CLUSTEREXTENSION_NAME}"] + verbs: [update] + # Allow ClusterExtensionRevisions to set blockOwnerDeletion ownerReferences + - apiGroups: [olm.operatorframework.io] + resources: [clusterextensionrevisions, clusterextensionrevisions/finalizers] + verbs: [update, create, list, watch, get, delete, patch] + + - apiGroups: [apiextensions.k8s.io] + resources: [customresourcedefinitions] + verbs: [update, create, list, watch, get, delete, patch] + - apiGroups: [""] + resources: + - configmaps + - secrets + - services + - serviceaccounts + - events + - namespaces + verbs: [update, create, list, watch, get, delete, patch] + - apiGroups: ["apps"] + resources: + - deployments + verbs: [ update, create, list, watch, get, delete, patch ] + - apiGroups: ["networking.k8s.io"] + resources: + - networkpolicies + verbs: [ update, create, list, watch, get, delete, patch ] + - apiGroups: ["rbac.authorization.k8s.io"] + resources: + - clusterroles + - roles + - clusterrolebindings + - rolebindings + verbs: [ update, create, list, watch, get, delete, patch ] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: [ update, create, list, watch, get, delete, patch ] + - apiGroups: ["authorization.k8s.io"] + resources: ["subjectaccessreviews"] + verbs: [create] + - apiGroups: ["authentication.k8s.io"] + resources: ["tokenreviews"] + verbs: [create] + + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: ${TEST_NAMESPACE}-${SERVICEACCOUNT_NAME}-install-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ${TEST_NAMESPACE}-${SERVICEACCOUNT_NAME}-olm-admin-clusterrole +subjects: + - kind: ServiceAccount + name: ${SERVICEACCOUNT_NAME} + namespace: ${TEST_NAMESPACE} diff --git a/test/e2e/steps/testdata/test-catalog-template.yaml b/test/e2e/steps/testdata/test-catalog-template.yaml new file mode 100644 index 000000000..7e46872f3 --- /dev/null +++ b/test/e2e/steps/testdata/test-catalog-template.yaml @@ -0,0 +1,11 @@ +apiVersion: olm.operatorframework.io/v1 +kind: ClusterCatalog +metadata: + name: test-catalog +spec: + priority: 0 + source: + type: Image + image: + pollIntervalMinutes: 1 + ref: ${CATALOG_IMG} diff --git a/test/e2e/webhook_support_test.go b/test/e2e/webhook_support_test.go deleted file mode 100644 index 9fd05184a..000000000 --- a/test/e2e/webhook_support_test.go +++ /dev/null @@ -1,237 +0,0 @@ -package e2e - -import ( - "context" - "fmt" - "os" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" - apimeta "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/dynamic" - "k8s.io/utils/ptr" - - ocv1 "github.com/operator-framework/operator-controller/api/v1" - testutil "github.com/operator-framework/operator-controller/internal/shared/util/test" - . "github.com/operator-framework/operator-controller/test/helpers" -) - -var dynamicClient dynamic.Interface - -func TestWebhookSupport(t *testing.T) { - SkipIfFeatureGateDisabled(t, "WebhookProviderCertManager") - t.Log("Test support for bundles with webhooks") - defer testutil.CollectTestArtifacts(t, artifactName, c, cfg) - - if dynamicClient == nil { - var err error - dynamicClient, err = dynamic.NewForConfig(cfg) - require.NoError(t, err) - } - - t.Log("By creating install namespace, and necessary rbac resources") - namespace := corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "webhook-operator", - }, - } - require.NoError(t, c.Create(t.Context(), &namespace)) - t.Cleanup(func() { - require.NoError(t, c.Delete(context.Background(), &namespace)) - }) - - serviceAccount := corev1.ServiceAccount{ - ObjectMeta: metav1.ObjectMeta{ - Name: "webhook-operator-installer", - Namespace: namespace.GetName(), - }, - } - require.NoError(t, c.Create(t.Context(), &serviceAccount)) - t.Cleanup(func() { - require.NoError(t, c.Delete(context.Background(), &serviceAccount)) - }) - - clusterRoleBinding := &rbacv1.ClusterRoleBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: "webhook-operator-installer", - }, - Subjects: []rbacv1.Subject{ - { - Kind: "ServiceAccount", - APIGroup: corev1.GroupName, - Name: serviceAccount.GetName(), - Namespace: serviceAccount.GetNamespace(), - }, - }, - RoleRef: rbacv1.RoleRef{ - APIGroup: rbacv1.GroupName, - Kind: "ClusterRole", - Name: "cluster-admin", - }, - } - require.NoError(t, c.Create(t.Context(), clusterRoleBinding)) - t.Cleanup(func() { - require.NoError(t, c.Delete(context.Background(), clusterRoleBinding)) - }) - - t.Log("By creating the webhook-operator ClusterCatalog") - extensionCatalog := &ocv1.ClusterCatalog{ - ObjectMeta: metav1.ObjectMeta{ - Name: "webhook-operator-catalog", - }, - Spec: ocv1.ClusterCatalogSpec{ - Source: ocv1.CatalogSource{ - Type: ocv1.SourceTypeImage, - Image: &ocv1.ImageSource{ - Ref: fmt.Sprintf("%s/e2e/test-catalog:v1", os.Getenv("CLUSTER_REGISTRY_HOST")), - PollIntervalMinutes: ptr.To(1), - }, - }, - }, - } - require.NoError(t, c.Create(t.Context(), extensionCatalog)) - t.Cleanup(func() { - require.NoError(t, c.Delete(context.Background(), extensionCatalog)) - }) - - t.Log("By waiting for the catalog to serve its metadata") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(context.Background(), types.NamespacedName{Name: extensionCatalog.GetName()}, extensionCatalog)) - cond := apimeta.FindStatusCondition(extensionCatalog.Status.Conditions, ocv1.TypeServing) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonAvailable, cond.Reason) - }, pollDuration, pollInterval) - - t.Log("By installing the webhook-operator ClusterExtension") - clusterExtension := &ocv1.ClusterExtension{ - ObjectMeta: metav1.ObjectMeta{ - Name: "webhook-operator-extension", - }, - Spec: ocv1.ClusterExtensionSpec{ - Source: ocv1.SourceConfig{ - SourceType: "Catalog", - Catalog: &ocv1.CatalogFilter{ - PackageName: "webhook-operator", - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"olm.operatorframework.io/metadata.name": extensionCatalog.Name}, - }, - }, - }, - Namespace: namespace.GetName(), - ServiceAccount: ocv1.ServiceAccountReference{ - Name: serviceAccount.GetName(), - }, - }, - } - require.NoError(t, c.Create(t.Context(), clusterExtension)) - t.Cleanup(func() { - require.NoError(t, c.Delete(context.Background(), clusterExtension)) - }) - - t.Log("By waiting for webhook-operator extension to be installed successfully") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - require.NoError(ct, c.Get(t.Context(), types.NamespacedName{Name: clusterExtension.Name}, clusterExtension)) - cond := apimeta.FindStatusCondition(clusterExtension.Status.Conditions, ocv1.TypeInstalled) - require.NotNil(ct, cond) - require.Equal(ct, metav1.ConditionTrue, cond.Status) - require.Equal(ct, ocv1.ReasonSucceeded, cond.Reason) - require.Contains(ct, cond.Message, "Installed bundle") - require.NotNil(ct, clusterExtension.Status.Install) - require.NotEmpty(ct, clusterExtension.Status.Install.Bundle) - }, pollDuration, pollInterval) - - t.Log("By waiting for webhook-operator deployment to be available") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - deployment := &appsv1.Deployment{} - require.NoError(ct, c.Get(t.Context(), types.NamespacedName{Namespace: namespace.GetName(), Name: "webhook-operator-controller-manager"}, deployment)) - available := false - for _, cond := range deployment.Status.Conditions { - if cond.Type == appsv1.DeploymentAvailable { - available = cond.Status == corev1.ConditionTrue - } - } - require.True(ct, available) - }, pollDuration, pollInterval) - - v1Gvr := schema.GroupVersionResource{ - Group: "webhook.operators.coreos.io", - Version: "v1", - Resource: "webhooktests", - } - v1Client := dynamicClient.Resource(v1Gvr).Namespace(namespace.GetName()) - - t.Log("By eventually seeing that invalid CR creation is rejected by the validating webhook") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - obj := getWebhookOperatorResource("invalid-test-cr", namespace.GetName(), false) - _, err := v1Client.Create(t.Context(), obj, metav1.CreateOptions{}) - require.Error(ct, err) - require.Contains(ct, err.Error(), "Invalid value: false: Spec.Valid must be true") - }, pollDuration, pollInterval) - - var ( - res *unstructured.Unstructured - err error - obj = getWebhookOperatorResource("valid-test-cr", namespace.GetName(), true) - ) - - t.Log("By eventually creating a valid CR") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - res, err = v1Client.Create(t.Context(), obj, metav1.CreateOptions{}) - require.NoError(ct, err) - }, pollDuration, pollInterval) - t.Cleanup(func() { - require.NoError(t, v1Client.Delete(context.Background(), obj.GetName(), metav1.DeleteOptions{})) - }) - - require.Equal(t, map[string]interface{}{ - "valid": true, - "mutate": true, - }, res.Object["spec"]) - - t.Log("By checking a valid CR is converted to v2 by the conversion webhook") - v2Gvr := schema.GroupVersionResource{ - Group: "webhook.operators.coreos.io", - Version: "v2", - Resource: "webhooktests", - } - v2Client := dynamicClient.Resource(v2Gvr).Namespace(namespace.GetName()) - - t.Log("By eventually getting the valid CR with a v2 client") - require.EventuallyWithT(t, func(ct *assert.CollectT) { - res, err = v2Client.Get(t.Context(), obj.GetName(), metav1.GetOptions{}) - require.NoError(ct, err) - }, pollDuration, pollInterval) - - t.Log("and verifying that the CR is correctly converted") - require.Equal(t, map[string]interface{}{ - "conversion": map[string]interface{}{ - "valid": true, - "mutate": true, - }, - }, res.Object["spec"]) -} - -func getWebhookOperatorResource(name string, namespace string, valid bool) *unstructured.Unstructured { - return &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "webhook.operators.coreos.io/v1", - "kind": "webhooktests", - "metadata": map[string]interface{}{ - "name": name, - "namespace": namespace, - }, - "spec": map[string]interface{}{ - "valid": valid, - }, - }, - } -} diff --git a/vendor/cel.dev/expr/BUILD.bazel b/vendor/cel.dev/expr/BUILD.bazel index 37d8adc95..f5bda3bb1 100644 --- a/vendor/cel.dev/expr/BUILD.bazel +++ b/vendor/cel.dev/expr/BUILD.bazel @@ -16,7 +16,6 @@ go_library( importpath = "cel.dev/expr", visibility = ["//visibility:public"], deps = [ - "@org_golang_google_genproto_googleapis_rpc//status:go_default_library", "@org_golang_google_protobuf//reflect/protoreflect", "@org_golang_google_protobuf//runtime/protoimpl", "@org_golang_google_protobuf//types/known/anypb", diff --git a/vendor/cel.dev/expr/MODULE.bazel b/vendor/cel.dev/expr/MODULE.bazel index 85ac9ff61..cb98ed599 100644 --- a/vendor/cel.dev/expr/MODULE.bazel +++ b/vendor/cel.dev/expr/MODULE.bazel @@ -11,26 +11,9 @@ bazel_dep( version = "0.39.1", repo_name = "bazel_gazelle", ) -bazel_dep( - name = "googleapis", - version = "0.0.0-20241220-5e258e33.bcr.1", - repo_name = "com_google_googleapis", -) -bazel_dep( - name = "googleapis-cc", - version = "1.0.0", -) -bazel_dep( - name = "googleapis-java", - version = "1.0.0", -) -bazel_dep( - name = "googleapis-go", - version = "1.0.0", -) bazel_dep( name = "protobuf", - version = "27.0", + version = "27.1", repo_name = "com_google_protobuf", ) bazel_dep( @@ -63,12 +46,11 @@ python.toolchain( ) go_sdk = use_extension("@io_bazel_rules_go//go:extensions.bzl", "go_sdk") -go_sdk.download(version = "1.22.0") +go_sdk.download(version = "1.23.0") go_deps = use_extension("@bazel_gazelle//:extensions.bzl", "go_deps") go_deps.from_file(go_mod = "//:go.mod") use_repo( go_deps, - "org_golang_google_genproto_googleapis_rpc", "org_golang_google_protobuf", ) diff --git a/vendor/cel.dev/expr/checked.pb.go b/vendor/cel.dev/expr/checked.pb.go index bb225c8ab..b18085e9b 100644 --- a/vendor/cel.dev/expr/checked.pb.go +++ b/vendor/cel.dev/expr/checked.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.5 +// protoc-gen-go v1.36.10 +// protoc v5.27.1 // source: cel/expr/checked.proto package expr @@ -13,6 +13,7 @@ import ( structpb "google.golang.org/protobuf/types/known/structpb" reflect "reflect" sync "sync" + unsafe "unsafe" ) const ( @@ -136,24 +137,21 @@ func (Type_WellKnownType) EnumDescriptor() ([]byte, []int) { } type CheckedExpr struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + ReferenceMap map[int64]*Reference `protobuf:"bytes,2,rep,name=reference_map,json=referenceMap,proto3" json:"reference_map,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + TypeMap map[int64]*Type `protobuf:"bytes,3,rep,name=type_map,json=typeMap,proto3" json:"type_map,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + SourceInfo *SourceInfo `protobuf:"bytes,5,opt,name=source_info,json=sourceInfo,proto3" json:"source_info,omitempty"` + ExprVersion string `protobuf:"bytes,6,opt,name=expr_version,json=exprVersion,proto3" json:"expr_version,omitempty"` + Expr *Expr `protobuf:"bytes,4,opt,name=expr,proto3" json:"expr,omitempty"` unknownFields protoimpl.UnknownFields - - ReferenceMap map[int64]*Reference `protobuf:"bytes,2,rep,name=reference_map,json=referenceMap,proto3" json:"reference_map,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - TypeMap map[int64]*Type `protobuf:"bytes,3,rep,name=type_map,json=typeMap,proto3" json:"type_map,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - SourceInfo *SourceInfo `protobuf:"bytes,5,opt,name=source_info,json=sourceInfo,proto3" json:"source_info,omitempty"` - ExprVersion string `protobuf:"bytes,6,opt,name=expr_version,json=exprVersion,proto3" json:"expr_version,omitempty"` - Expr *Expr `protobuf:"bytes,4,opt,name=expr,proto3" json:"expr,omitempty"` + sizeCache protoimpl.SizeCache } func (x *CheckedExpr) Reset() { *x = CheckedExpr{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_checked_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_checked_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *CheckedExpr) String() string { @@ -164,7 +162,7 @@ func (*CheckedExpr) ProtoMessage() {} func (x *CheckedExpr) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_checked_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -215,11 +213,8 @@ func (x *CheckedExpr) GetExpr() *Expr { } type Type struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to TypeKind: + state protoimpl.MessageState `protogen:"open.v1"` + // Types that are valid to be assigned to TypeKind: // // *Type_Dyn // *Type_Null @@ -234,16 +229,16 @@ type Type struct { // *Type_Type // *Type_Error // *Type_AbstractType_ - TypeKind isType_TypeKind `protobuf_oneof:"type_kind"` + TypeKind isType_TypeKind `protobuf_oneof:"type_kind"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Type) Reset() { *x = Type{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_checked_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_checked_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Type) String() string { @@ -254,7 +249,7 @@ func (*Type) ProtoMessage() {} func (x *Type) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_checked_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -269,100 +264,126 @@ func (*Type) Descriptor() ([]byte, []int) { return file_cel_expr_checked_proto_rawDescGZIP(), []int{1} } -func (m *Type) GetTypeKind() isType_TypeKind { - if m != nil { - return m.TypeKind +func (x *Type) GetTypeKind() isType_TypeKind { + if x != nil { + return x.TypeKind } return nil } func (x *Type) GetDyn() *emptypb.Empty { - if x, ok := x.GetTypeKind().(*Type_Dyn); ok { - return x.Dyn + if x != nil { + if x, ok := x.TypeKind.(*Type_Dyn); ok { + return x.Dyn + } } return nil } func (x *Type) GetNull() structpb.NullValue { - if x, ok := x.GetTypeKind().(*Type_Null); ok { - return x.Null + if x != nil { + if x, ok := x.TypeKind.(*Type_Null); ok { + return x.Null + } } return structpb.NullValue(0) } func (x *Type) GetPrimitive() Type_PrimitiveType { - if x, ok := x.GetTypeKind().(*Type_Primitive); ok { - return x.Primitive + if x != nil { + if x, ok := x.TypeKind.(*Type_Primitive); ok { + return x.Primitive + } } return Type_PRIMITIVE_TYPE_UNSPECIFIED } func (x *Type) GetWrapper() Type_PrimitiveType { - if x, ok := x.GetTypeKind().(*Type_Wrapper); ok { - return x.Wrapper + if x != nil { + if x, ok := x.TypeKind.(*Type_Wrapper); ok { + return x.Wrapper + } } return Type_PRIMITIVE_TYPE_UNSPECIFIED } func (x *Type) GetWellKnown() Type_WellKnownType { - if x, ok := x.GetTypeKind().(*Type_WellKnown); ok { - return x.WellKnown + if x != nil { + if x, ok := x.TypeKind.(*Type_WellKnown); ok { + return x.WellKnown + } } return Type_WELL_KNOWN_TYPE_UNSPECIFIED } func (x *Type) GetListType() *Type_ListType { - if x, ok := x.GetTypeKind().(*Type_ListType_); ok { - return x.ListType + if x != nil { + if x, ok := x.TypeKind.(*Type_ListType_); ok { + return x.ListType + } } return nil } func (x *Type) GetMapType() *Type_MapType { - if x, ok := x.GetTypeKind().(*Type_MapType_); ok { - return x.MapType + if x != nil { + if x, ok := x.TypeKind.(*Type_MapType_); ok { + return x.MapType + } } return nil } func (x *Type) GetFunction() *Type_FunctionType { - if x, ok := x.GetTypeKind().(*Type_Function); ok { - return x.Function + if x != nil { + if x, ok := x.TypeKind.(*Type_Function); ok { + return x.Function + } } return nil } func (x *Type) GetMessageType() string { - if x, ok := x.GetTypeKind().(*Type_MessageType); ok { - return x.MessageType + if x != nil { + if x, ok := x.TypeKind.(*Type_MessageType); ok { + return x.MessageType + } } return "" } func (x *Type) GetTypeParam() string { - if x, ok := x.GetTypeKind().(*Type_TypeParam); ok { - return x.TypeParam + if x != nil { + if x, ok := x.TypeKind.(*Type_TypeParam); ok { + return x.TypeParam + } } return "" } func (x *Type) GetType() *Type { - if x, ok := x.GetTypeKind().(*Type_Type); ok { - return x.Type + if x != nil { + if x, ok := x.TypeKind.(*Type_Type); ok { + return x.Type + } } return nil } func (x *Type) GetError() *emptypb.Empty { - if x, ok := x.GetTypeKind().(*Type_Error); ok { - return x.Error + if x != nil { + if x, ok := x.TypeKind.(*Type_Error); ok { + return x.Error + } } return nil } func (x *Type) GetAbstractType() *Type_AbstractType { - if x, ok := x.GetTypeKind().(*Type_AbstractType_); ok { - return x.AbstractType + if x != nil { + if x, ok := x.TypeKind.(*Type_AbstractType_); ok { + return x.AbstractType + } } return nil } @@ -450,25 +471,22 @@ func (*Type_Error) isType_TypeKind() {} func (*Type_AbstractType_) isType_TypeKind() {} type Decl struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Types that are assignable to DeclKind: + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Types that are valid to be assigned to DeclKind: // // *Decl_Ident // *Decl_Function - DeclKind isDecl_DeclKind `protobuf_oneof:"decl_kind"` + DeclKind isDecl_DeclKind `protobuf_oneof:"decl_kind"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Decl) Reset() { *x = Decl{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_checked_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_checked_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Decl) String() string { @@ -479,7 +497,7 @@ func (*Decl) ProtoMessage() {} func (x *Decl) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_checked_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -501,23 +519,27 @@ func (x *Decl) GetName() string { return "" } -func (m *Decl) GetDeclKind() isDecl_DeclKind { - if m != nil { - return m.DeclKind +func (x *Decl) GetDeclKind() isDecl_DeclKind { + if x != nil { + return x.DeclKind } return nil } func (x *Decl) GetIdent() *Decl_IdentDecl { - if x, ok := x.GetDeclKind().(*Decl_Ident); ok { - return x.Ident + if x != nil { + if x, ok := x.DeclKind.(*Decl_Ident); ok { + return x.Ident + } } return nil } func (x *Decl) GetFunction() *Decl_FunctionDecl { - if x, ok := x.GetDeclKind().(*Decl_Function); ok { - return x.Function + if x != nil { + if x, ok := x.DeclKind.(*Decl_Function); ok { + return x.Function + } } return nil } @@ -539,22 +561,19 @@ func (*Decl_Ident) isDecl_DeclKind() {} func (*Decl_Function) isDecl_DeclKind() {} type Reference struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + OverloadId []string `protobuf:"bytes,3,rep,name=overload_id,json=overloadId,proto3" json:"overload_id,omitempty"` + Value *Constant `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"` unknownFields protoimpl.UnknownFields - - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - OverloadId []string `protobuf:"bytes,3,rep,name=overload_id,json=overloadId,proto3" json:"overload_id,omitempty"` - Value *Constant `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Reference) Reset() { *x = Reference{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_checked_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_checked_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Reference) String() string { @@ -565,7 +584,7 @@ func (*Reference) ProtoMessage() {} func (x *Reference) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_checked_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -602,20 +621,17 @@ func (x *Reference) GetValue() *Constant { } type Type_ListType struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + ElemType *Type `protobuf:"bytes,1,opt,name=elem_type,json=elemType,proto3" json:"elem_type,omitempty"` unknownFields protoimpl.UnknownFields - - ElemType *Type `protobuf:"bytes,1,opt,name=elem_type,json=elemType,proto3" json:"elem_type,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Type_ListType) Reset() { *x = Type_ListType{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_checked_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_checked_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Type_ListType) String() string { @@ -626,7 +642,7 @@ func (*Type_ListType) ProtoMessage() {} func (x *Type_ListType) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_checked_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -649,21 +665,18 @@ func (x *Type_ListType) GetElemType() *Type { } type Type_MapType struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + KeyType *Type `protobuf:"bytes,1,opt,name=key_type,json=keyType,proto3" json:"key_type,omitempty"` + ValueType *Type `protobuf:"bytes,2,opt,name=value_type,json=valueType,proto3" json:"value_type,omitempty"` unknownFields protoimpl.UnknownFields - - KeyType *Type `protobuf:"bytes,1,opt,name=key_type,json=keyType,proto3" json:"key_type,omitempty"` - ValueType *Type `protobuf:"bytes,2,opt,name=value_type,json=valueType,proto3" json:"value_type,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Type_MapType) Reset() { *x = Type_MapType{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_checked_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_checked_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Type_MapType) String() string { @@ -674,7 +687,7 @@ func (*Type_MapType) ProtoMessage() {} func (x *Type_MapType) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_checked_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -704,21 +717,18 @@ func (x *Type_MapType) GetValueType() *Type { } type Type_FunctionType struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + ResultType *Type `protobuf:"bytes,1,opt,name=result_type,json=resultType,proto3" json:"result_type,omitempty"` + ArgTypes []*Type `protobuf:"bytes,2,rep,name=arg_types,json=argTypes,proto3" json:"arg_types,omitempty"` unknownFields protoimpl.UnknownFields - - ResultType *Type `protobuf:"bytes,1,opt,name=result_type,json=resultType,proto3" json:"result_type,omitempty"` - ArgTypes []*Type `protobuf:"bytes,2,rep,name=arg_types,json=argTypes,proto3" json:"arg_types,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Type_FunctionType) Reset() { *x = Type_FunctionType{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_checked_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_checked_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Type_FunctionType) String() string { @@ -729,7 +739,7 @@ func (*Type_FunctionType) ProtoMessage() {} func (x *Type_FunctionType) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_checked_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -759,21 +769,18 @@ func (x *Type_FunctionType) GetArgTypes() []*Type { } type Type_AbstractType struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - ParameterTypes []*Type `protobuf:"bytes,2,rep,name=parameter_types,json=parameterTypes,proto3" json:"parameter_types,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + ParameterTypes []*Type `protobuf:"bytes,2,rep,name=parameter_types,json=parameterTypes,proto3" json:"parameter_types,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Type_AbstractType) Reset() { *x = Type_AbstractType{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_checked_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_checked_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Type_AbstractType) String() string { @@ -784,7 +791,7 @@ func (*Type_AbstractType) ProtoMessage() {} func (x *Type_AbstractType) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_checked_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -814,22 +821,19 @@ func (x *Type_AbstractType) GetParameterTypes() []*Type { } type Decl_IdentDecl struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Type *Type `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Value *Constant `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + Doc string `protobuf:"bytes,3,opt,name=doc,proto3" json:"doc,omitempty"` unknownFields protoimpl.UnknownFields - - Type *Type `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Value *Constant `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - Doc string `protobuf:"bytes,3,opt,name=doc,proto3" json:"doc,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Decl_IdentDecl) Reset() { *x = Decl_IdentDecl{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_checked_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_checked_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Decl_IdentDecl) String() string { @@ -840,7 +844,7 @@ func (*Decl_IdentDecl) ProtoMessage() {} func (x *Decl_IdentDecl) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_checked_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -877,20 +881,18 @@ func (x *Decl_IdentDecl) GetDoc() string { } type Decl_FunctionDecl struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Overloads []*Decl_FunctionDecl_Overload `protobuf:"bytes,1,rep,name=overloads,proto3" json:"overloads,omitempty"` + Doc string `protobuf:"bytes,2,opt,name=doc,proto3" json:"doc,omitempty"` unknownFields protoimpl.UnknownFields - - Overloads []*Decl_FunctionDecl_Overload `protobuf:"bytes,1,rep,name=overloads,proto3" json:"overloads,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Decl_FunctionDecl) Reset() { *x = Decl_FunctionDecl{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_checked_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_checked_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Decl_FunctionDecl) String() string { @@ -901,7 +903,7 @@ func (*Decl_FunctionDecl) ProtoMessage() {} func (x *Decl_FunctionDecl) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_checked_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -923,26 +925,30 @@ func (x *Decl_FunctionDecl) GetOverloads() []*Decl_FunctionDecl_Overload { return nil } -type Decl_FunctionDecl_Overload struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *Decl_FunctionDecl) GetDoc() string { + if x != nil { + return x.Doc + } + return "" +} - OverloadId string `protobuf:"bytes,1,opt,name=overload_id,json=overloadId,proto3" json:"overload_id,omitempty"` - Params []*Type `protobuf:"bytes,2,rep,name=params,proto3" json:"params,omitempty"` - TypeParams []string `protobuf:"bytes,3,rep,name=type_params,json=typeParams,proto3" json:"type_params,omitempty"` - ResultType *Type `protobuf:"bytes,4,opt,name=result_type,json=resultType,proto3" json:"result_type,omitempty"` - IsInstanceFunction bool `protobuf:"varint,5,opt,name=is_instance_function,json=isInstanceFunction,proto3" json:"is_instance_function,omitempty"` - Doc string `protobuf:"bytes,6,opt,name=doc,proto3" json:"doc,omitempty"` +type Decl_FunctionDecl_Overload struct { + state protoimpl.MessageState `protogen:"open.v1"` + OverloadId string `protobuf:"bytes,1,opt,name=overload_id,json=overloadId,proto3" json:"overload_id,omitempty"` + Params []*Type `protobuf:"bytes,2,rep,name=params,proto3" json:"params,omitempty"` + TypeParams []string `protobuf:"bytes,3,rep,name=type_params,json=typeParams,proto3" json:"type_params,omitempty"` + ResultType *Type `protobuf:"bytes,4,opt,name=result_type,json=resultType,proto3" json:"result_type,omitempty"` + IsInstanceFunction bool `protobuf:"varint,5,opt,name=is_instance_function,json=isInstanceFunction,proto3" json:"is_instance_function,omitempty"` + Doc string `protobuf:"bytes,6,opt,name=doc,proto3" json:"doc,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Decl_FunctionDecl_Overload) Reset() { *x = Decl_FunctionDecl_Overload{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_checked_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_checked_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Decl_FunctionDecl_Overload) String() string { @@ -953,7 +959,7 @@ func (*Decl_FunctionDecl_Overload) ProtoMessage() {} func (x *Decl_FunctionDecl_Overload) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_checked_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1012,185 +1018,113 @@ func (x *Decl_FunctionDecl_Overload) GetDoc() string { var File_cel_expr_checked_proto protoreflect.FileDescriptor -var file_cel_expr_checked_proto_rawDesc = []byte{ - 0x0a, 0x16, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x63, 0x68, 0x65, 0x63, 0x6b, - 0x65, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, - 0x70, 0x72, 0x1a, 0x15, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x73, 0x79, 0x6e, - 0x74, 0x61, 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xba, 0x03, 0x0a, 0x0b, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, - 0x45, 0x78, 0x70, 0x72, 0x12, 0x4c, 0x0a, 0x0d, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, - 0x65, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x63, 0x65, - 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x45, 0x78, - 0x70, 0x72, 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4d, 0x61, 0x70, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4d, - 0x61, 0x70, 0x12, 0x3d, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x03, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, - 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x4d, 0x61, - 0x70, 0x12, 0x35, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, - 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0a, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x21, 0x0a, 0x0c, 0x65, 0x78, 0x70, 0x72, - 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x65, 0x78, 0x70, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x04, 0x65, - 0x78, 0x70, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, - 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x04, 0x65, 0x78, 0x70, 0x72, 0x1a, - 0x54, 0x0a, 0x11, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4d, 0x61, 0x70, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, - 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4a, 0x0a, 0x0c, 0x54, 0x79, 0x70, 0x65, 0x4d, 0x61, 0x70, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, - 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, - 0x01, 0x22, 0xe6, 0x09, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2a, 0x0a, 0x03, 0x64, 0x79, - 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48, - 0x00, 0x52, 0x03, 0x64, 0x79, 0x6e, 0x12, 0x30, 0x0a, 0x04, 0x6e, 0x75, 0x6c, 0x6c, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x48, 0x00, 0x52, 0x04, 0x6e, 0x75, 0x6c, 0x6c, 0x12, 0x3c, 0x0a, 0x09, 0x70, 0x72, 0x69, 0x6d, - 0x69, 0x74, 0x69, 0x76, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x63, 0x65, - 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x50, 0x72, 0x69, 0x6d, - 0x69, 0x74, 0x69, 0x76, 0x65, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x09, 0x70, 0x72, 0x69, - 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, - 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, - 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, - 0x65, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, - 0x12, 0x3d, 0x0a, 0x0a, 0x77, 0x65, 0x6c, 0x6c, 0x5f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, - 0x54, 0x79, 0x70, 0x65, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, - 0x70, 0x65, 0x48, 0x00, 0x52, 0x09, 0x77, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x12, - 0x36, 0x0a, 0x09, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, - 0x70, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x08, 0x6c, - 0x69, 0x73, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x33, 0x0a, 0x08, 0x6d, 0x61, 0x70, 0x5f, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x65, 0x6c, 0x2e, - 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x4d, 0x61, 0x70, 0x54, 0x79, 0x70, - 0x65, 0x48, 0x00, 0x52, 0x07, 0x6d, 0x61, 0x70, 0x54, 0x79, 0x70, 0x65, 0x12, 0x39, 0x0a, 0x08, - 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, - 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x46, - 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x08, 0x66, - 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, - 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0a, - 0x74, 0x79, 0x70, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, - 0x48, 0x00, 0x52, 0x09, 0x74, 0x79, 0x70, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x12, 0x24, 0x0a, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, - 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x04, 0x74, - 0x79, 0x70, 0x65, 0x12, 0x2e, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x0c, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48, 0x00, 0x52, 0x05, 0x65, 0x72, - 0x72, 0x6f, 0x72, 0x12, 0x42, 0x0a, 0x0d, 0x61, 0x62, 0x73, 0x74, 0x72, 0x61, 0x63, 0x74, 0x5f, - 0x74, 0x79, 0x70, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x65, 0x6c, - 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x41, 0x62, 0x73, 0x74, 0x72, - 0x61, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x0c, 0x61, 0x62, 0x73, 0x74, 0x72, - 0x61, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x1a, 0x37, 0x0a, 0x08, 0x4c, 0x69, 0x73, 0x74, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x2b, 0x0a, 0x09, 0x65, 0x6c, 0x65, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, - 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x65, 0x6c, 0x65, 0x6d, 0x54, 0x79, 0x70, 0x65, - 0x1a, 0x63, 0x0a, 0x07, 0x4d, 0x61, 0x70, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x08, 0x6b, - 0x65, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, - 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x6b, - 0x65, 0x79, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2d, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, - 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, - 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x54, 0x79, 0x70, 0x65, 0x1a, 0x6c, 0x0a, 0x0c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2f, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x5f, - 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, - 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x75, - 0x6c, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2b, 0x0a, 0x09, 0x61, 0x72, 0x67, 0x5f, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, - 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x61, 0x72, 0x67, 0x54, 0x79, - 0x70, 0x65, 0x73, 0x1a, 0x5b, 0x0a, 0x0c, 0x41, 0x62, 0x73, 0x74, 0x72, 0x61, 0x63, 0x74, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x0f, 0x70, 0x61, 0x72, 0x61, 0x6d, - 0x65, 0x74, 0x65, 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, - 0x52, 0x0e, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x73, - 0x22, 0x73, 0x0a, 0x0d, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x1e, 0x0a, 0x1a, 0x50, 0x52, 0x49, 0x4d, 0x49, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x00, 0x12, 0x08, 0x0a, 0x04, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x49, - 0x4e, 0x54, 0x36, 0x34, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, - 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x04, 0x12, 0x0a, - 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x09, 0x0a, 0x05, 0x42, 0x59, - 0x54, 0x45, 0x53, 0x10, 0x06, 0x22, 0x56, 0x0a, 0x0d, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, - 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x57, 0x45, 0x4c, 0x4c, 0x5f, 0x4b, - 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, - 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4e, 0x59, 0x10, 0x01, - 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x49, 0x4d, 0x45, 0x53, 0x54, 0x41, 0x4d, 0x50, 0x10, 0x02, 0x12, - 0x0c, 0x0a, 0x08, 0x44, 0x55, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x03, 0x42, 0x0b, 0x0a, - 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xc2, 0x04, 0x0a, 0x04, 0x44, - 0x65, 0x63, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x30, 0x0a, 0x05, 0x69, 0x64, 0x65, 0x6e, 0x74, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, - 0x72, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x44, 0x65, 0x63, 0x6c, - 0x48, 0x00, 0x52, 0x05, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x12, 0x39, 0x0a, 0x08, 0x66, 0x75, 0x6e, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x65, - 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x46, 0x75, 0x6e, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x63, 0x6c, 0x48, 0x00, 0x52, 0x08, 0x66, 0x75, 0x6e, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x6b, 0x0a, 0x09, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x44, 0x65, 0x63, - 0x6c, 0x12, 0x22, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x28, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, - 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, - 0x10, 0x0a, 0x03, 0x64, 0x6f, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x64, 0x6f, - 0x63, 0x1a, 0xbe, 0x02, 0x0a, 0x0c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, - 0x63, 0x6c, 0x12, 0x42, 0x0a, 0x09, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, - 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, - 0x63, 0x6c, 0x2e, 0x4f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x09, 0x6f, 0x76, 0x65, - 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x1a, 0xe9, 0x01, 0x0a, 0x08, 0x4f, 0x76, 0x65, 0x72, 0x6c, - 0x6f, 0x61, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x5f, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, - 0x61, 0x64, 0x49, 0x64, 0x12, 0x26, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, - 0x54, 0x79, 0x70, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x1f, 0x0a, 0x0b, - 0x74, 0x79, 0x70, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x0a, 0x74, 0x79, 0x70, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x2f, 0x0a, - 0x0b, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, - 0x70, 0x65, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x30, - 0x0a, 0x14, 0x69, 0x73, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x66, 0x75, - 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x69, 0x73, - 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x10, 0x0a, 0x03, 0x64, 0x6f, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x64, - 0x6f, 0x63, 0x42, 0x0b, 0x0a, 0x09, 0x64, 0x65, 0x63, 0x6c, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, - 0x6a, 0x0a, 0x09, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, - 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x49, - 0x64, 0x12, 0x28, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x73, - 0x74, 0x61, 0x6e, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x2c, 0x0a, 0x0c, 0x64, - 0x65, 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x09, 0x44, 0x65, 0x63, - 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, 0x65, - 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, -} +const file_cel_expr_checked_proto_rawDesc = "" + + "\n" + + "\x16cel/expr/checked.proto\x12\bcel.expr\x1a\x15cel/expr/syntax.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a\x1cgoogle/protobuf/struct.proto\"\xba\x03\n" + + "\vCheckedExpr\x12L\n" + + "\rreference_map\x18\x02 \x03(\v2'.cel.expr.CheckedExpr.ReferenceMapEntryR\freferenceMap\x12=\n" + + "\btype_map\x18\x03 \x03(\v2\".cel.expr.CheckedExpr.TypeMapEntryR\atypeMap\x125\n" + + "\vsource_info\x18\x05 \x01(\v2\x14.cel.expr.SourceInfoR\n" + + "sourceInfo\x12!\n" + + "\fexpr_version\x18\x06 \x01(\tR\vexprVersion\x12\"\n" + + "\x04expr\x18\x04 \x01(\v2\x0e.cel.expr.ExprR\x04expr\x1aT\n" + + "\x11ReferenceMapEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\x03R\x03key\x12)\n" + + "\x05value\x18\x02 \x01(\v2\x13.cel.expr.ReferenceR\x05value:\x028\x01\x1aJ\n" + + "\fTypeMapEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\x03R\x03key\x12$\n" + + "\x05value\x18\x02 \x01(\v2\x0e.cel.expr.TypeR\x05value:\x028\x01\"\xe6\t\n" + + "\x04Type\x12*\n" + + "\x03dyn\x18\x01 \x01(\v2\x16.google.protobuf.EmptyH\x00R\x03dyn\x120\n" + + "\x04null\x18\x02 \x01(\x0e2\x1a.google.protobuf.NullValueH\x00R\x04null\x12<\n" + + "\tprimitive\x18\x03 \x01(\x0e2\x1c.cel.expr.Type.PrimitiveTypeH\x00R\tprimitive\x128\n" + + "\awrapper\x18\x04 \x01(\x0e2\x1c.cel.expr.Type.PrimitiveTypeH\x00R\awrapper\x12=\n" + + "\n" + + "well_known\x18\x05 \x01(\x0e2\x1c.cel.expr.Type.WellKnownTypeH\x00R\twellKnown\x126\n" + + "\tlist_type\x18\x06 \x01(\v2\x17.cel.expr.Type.ListTypeH\x00R\blistType\x123\n" + + "\bmap_type\x18\a \x01(\v2\x16.cel.expr.Type.MapTypeH\x00R\amapType\x129\n" + + "\bfunction\x18\b \x01(\v2\x1b.cel.expr.Type.FunctionTypeH\x00R\bfunction\x12#\n" + + "\fmessage_type\x18\t \x01(\tH\x00R\vmessageType\x12\x1f\n" + + "\n" + + "type_param\x18\n" + + " \x01(\tH\x00R\ttypeParam\x12$\n" + + "\x04type\x18\v \x01(\v2\x0e.cel.expr.TypeH\x00R\x04type\x12.\n" + + "\x05error\x18\f \x01(\v2\x16.google.protobuf.EmptyH\x00R\x05error\x12B\n" + + "\rabstract_type\x18\x0e \x01(\v2\x1b.cel.expr.Type.AbstractTypeH\x00R\fabstractType\x1a7\n" + + "\bListType\x12+\n" + + "\telem_type\x18\x01 \x01(\v2\x0e.cel.expr.TypeR\belemType\x1ac\n" + + "\aMapType\x12)\n" + + "\bkey_type\x18\x01 \x01(\v2\x0e.cel.expr.TypeR\akeyType\x12-\n" + + "\n" + + "value_type\x18\x02 \x01(\v2\x0e.cel.expr.TypeR\tvalueType\x1al\n" + + "\fFunctionType\x12/\n" + + "\vresult_type\x18\x01 \x01(\v2\x0e.cel.expr.TypeR\n" + + "resultType\x12+\n" + + "\targ_types\x18\x02 \x03(\v2\x0e.cel.expr.TypeR\bargTypes\x1a[\n" + + "\fAbstractType\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x127\n" + + "\x0fparameter_types\x18\x02 \x03(\v2\x0e.cel.expr.TypeR\x0eparameterTypes\"s\n" + + "\rPrimitiveType\x12\x1e\n" + + "\x1aPRIMITIVE_TYPE_UNSPECIFIED\x10\x00\x12\b\n" + + "\x04BOOL\x10\x01\x12\t\n" + + "\x05INT64\x10\x02\x12\n" + + "\n" + + "\x06UINT64\x10\x03\x12\n" + + "\n" + + "\x06DOUBLE\x10\x04\x12\n" + + "\n" + + "\x06STRING\x10\x05\x12\t\n" + + "\x05BYTES\x10\x06\"V\n" + + "\rWellKnownType\x12\x1f\n" + + "\x1bWELL_KNOWN_TYPE_UNSPECIFIED\x10\x00\x12\a\n" + + "\x03ANY\x10\x01\x12\r\n" + + "\tTIMESTAMP\x10\x02\x12\f\n" + + "\bDURATION\x10\x03B\v\n" + + "\ttype_kind\"\xd4\x04\n" + + "\x04Decl\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x120\n" + + "\x05ident\x18\x02 \x01(\v2\x18.cel.expr.Decl.IdentDeclH\x00R\x05ident\x129\n" + + "\bfunction\x18\x03 \x01(\v2\x1b.cel.expr.Decl.FunctionDeclH\x00R\bfunction\x1ak\n" + + "\tIdentDecl\x12\"\n" + + "\x04type\x18\x01 \x01(\v2\x0e.cel.expr.TypeR\x04type\x12(\n" + + "\x05value\x18\x02 \x01(\v2\x12.cel.expr.ConstantR\x05value\x12\x10\n" + + "\x03doc\x18\x03 \x01(\tR\x03doc\x1a\xd0\x02\n" + + "\fFunctionDecl\x12B\n" + + "\toverloads\x18\x01 \x03(\v2$.cel.expr.Decl.FunctionDecl.OverloadR\toverloads\x12\x10\n" + + "\x03doc\x18\x02 \x01(\tR\x03doc\x1a\xe9\x01\n" + + "\bOverload\x12\x1f\n" + + "\voverload_id\x18\x01 \x01(\tR\n" + + "overloadId\x12&\n" + + "\x06params\x18\x02 \x03(\v2\x0e.cel.expr.TypeR\x06params\x12\x1f\n" + + "\vtype_params\x18\x03 \x03(\tR\n" + + "typeParams\x12/\n" + + "\vresult_type\x18\x04 \x01(\v2\x0e.cel.expr.TypeR\n" + + "resultType\x120\n" + + "\x14is_instance_function\x18\x05 \x01(\bR\x12isInstanceFunction\x12\x10\n" + + "\x03doc\x18\x06 \x01(\tR\x03docB\v\n" + + "\tdecl_kind\"j\n" + + "\tReference\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x1f\n" + + "\voverload_id\x18\x03 \x03(\tR\n" + + "overloadId\x12(\n" + + "\x05value\x18\x04 \x01(\v2\x12.cel.expr.ConstantR\x05valueB,\n" + + "\fdev.cel.exprB\tDeclProtoP\x01Z\fcel.dev/expr\xf8\x01\x01b\x06proto3" var ( file_cel_expr_checked_proto_rawDescOnce sync.Once - file_cel_expr_checked_proto_rawDescData = file_cel_expr_checked_proto_rawDesc + file_cel_expr_checked_proto_rawDescData []byte ) func file_cel_expr_checked_proto_rawDescGZIP() []byte { file_cel_expr_checked_proto_rawDescOnce.Do(func() { - file_cel_expr_checked_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_checked_proto_rawDescData) + file_cel_expr_checked_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_cel_expr_checked_proto_rawDesc), len(file_cel_expr_checked_proto_rawDesc))) }) return file_cel_expr_checked_proto_rawDescData } var file_cel_expr_checked_proto_enumTypes = make([]protoimpl.EnumInfo, 2) var file_cel_expr_checked_proto_msgTypes = make([]protoimpl.MessageInfo, 13) -var file_cel_expr_checked_proto_goTypes = []interface{}{ +var file_cel_expr_checked_proto_goTypes = []any{ (Type_PrimitiveType)(0), // 0: cel.expr.Type.PrimitiveType (Type_WellKnownType)(0), // 1: cel.expr.Type.WellKnownType (*CheckedExpr)(nil), // 2: cel.expr.CheckedExpr @@ -1257,141 +1191,7 @@ func file_cel_expr_checked_proto_init() { return } file_cel_expr_syntax_proto_init() - if !protoimpl.UnsafeEnabled { - file_cel_expr_checked_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CheckedExpr); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_checked_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Type); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_checked_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Decl); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_checked_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Reference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_checked_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Type_ListType); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_checked_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Type_MapType); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_checked_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Type_FunctionType); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_checked_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Type_AbstractType); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_checked_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Decl_IdentDecl); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_checked_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Decl_FunctionDecl); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_checked_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Decl_FunctionDecl_Overload); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_cel_expr_checked_proto_msgTypes[1].OneofWrappers = []interface{}{ + file_cel_expr_checked_proto_msgTypes[1].OneofWrappers = []any{ (*Type_Dyn)(nil), (*Type_Null)(nil), (*Type_Primitive)(nil), @@ -1406,7 +1206,7 @@ func file_cel_expr_checked_proto_init() { (*Type_Error)(nil), (*Type_AbstractType_)(nil), } - file_cel_expr_checked_proto_msgTypes[2].OneofWrappers = []interface{}{ + file_cel_expr_checked_proto_msgTypes[2].OneofWrappers = []any{ (*Decl_Ident)(nil), (*Decl_Function)(nil), } @@ -1414,7 +1214,7 @@ func file_cel_expr_checked_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_cel_expr_checked_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_cel_expr_checked_proto_rawDesc), len(file_cel_expr_checked_proto_rawDesc)), NumEnums: 2, NumMessages: 13, NumExtensions: 0, @@ -1426,7 +1226,6 @@ func file_cel_expr_checked_proto_init() { MessageInfos: file_cel_expr_checked_proto_msgTypes, }.Build() File_cel_expr_checked_proto = out.File - file_cel_expr_checked_proto_rawDesc = nil file_cel_expr_checked_proto_goTypes = nil file_cel_expr_checked_proto_depIdxs = nil } diff --git a/vendor/cel.dev/expr/eval.pb.go b/vendor/cel.dev/expr/eval.pb.go index a7aae0900..83acb8935 100644 --- a/vendor/cel.dev/expr/eval.pb.go +++ b/vendor/cel.dev/expr/eval.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.3 +// protoc-gen-go v1.36.10 // protoc v5.27.1 // source: cel/expr/eval.proto @@ -12,6 +12,7 @@ import ( anypb "google.golang.org/protobuf/types/known/anypb" reflect "reflect" sync "sync" + unsafe "unsafe" ) const ( @@ -373,58 +374,39 @@ func (x *EvalState_Result) GetValue() int64 { var File_cel_expr_eval_proto protoreflect.FileDescriptor -var file_cel_expr_eval_proto_rawDesc = []byte{ - 0x0a, 0x13, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x65, 0x76, 0x61, 0x6c, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x1a, - 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x14, 0x63, 0x65, 0x6c, 0x2f, - 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x22, 0xa2, 0x01, 0x0a, 0x09, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2b, - 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, - 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x72, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x63, - 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, - 0x73, 0x1a, 0x32, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, - 0x78, 0x70, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x65, 0x78, 0x70, 0x72, 0x12, - 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x9a, 0x01, 0x0a, 0x09, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2a, 0x0a, 0x05, - 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, - 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x48, - 0x00, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x30, 0x0a, 0x07, 0x75, 0x6e, 0x6b, 0x6e, - 0x6f, 0x77, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e, - 0x65, 0x78, 0x70, 0x72, 0x2e, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x48, - 0x00, 0x52, 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, - 0x6e, 0x64, 0x22, 0x34, 0x0a, 0x08, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x28, - 0x0a, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, - 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x52, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x22, 0x66, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x12, 0x2e, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, - 0x22, 0x22, 0x0a, 0x0a, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x12, 0x14, - 0x0a, 0x05, 0x65, 0x78, 0x70, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x03, 0x52, 0x05, 0x65, - 0x78, 0x70, 0x72, 0x73, 0x42, 0x2c, 0x0a, 0x0c, 0x64, 0x65, 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, - 0x65, 0x78, 0x70, 0x72, 0x42, 0x09, 0x45, 0x76, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, - 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, 0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, - 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +const file_cel_expr_eval_proto_rawDesc = "" + + "\n" + + "\x13cel/expr/eval.proto\x12\bcel.expr\x1a\x19google/protobuf/any.proto\x1a\x14cel/expr/value.proto\"\xa2\x01\n" + + "\tEvalState\x12+\n" + + "\x06values\x18\x01 \x03(\v2\x13.cel.expr.ExprValueR\x06values\x124\n" + + "\aresults\x18\x03 \x03(\v2\x1a.cel.expr.EvalState.ResultR\aresults\x1a2\n" + + "\x06Result\x12\x12\n" + + "\x04expr\x18\x01 \x01(\x03R\x04expr\x12\x14\n" + + "\x05value\x18\x02 \x01(\x03R\x05value\"\x9a\x01\n" + + "\tExprValue\x12'\n" + + "\x05value\x18\x01 \x01(\v2\x0f.cel.expr.ValueH\x00R\x05value\x12*\n" + + "\x05error\x18\x02 \x01(\v2\x12.cel.expr.ErrorSetH\x00R\x05error\x120\n" + + "\aunknown\x18\x03 \x01(\v2\x14.cel.expr.UnknownSetH\x00R\aunknownB\x06\n" + + "\x04kind\"4\n" + + "\bErrorSet\x12(\n" + + "\x06errors\x18\x01 \x03(\v2\x10.cel.expr.StatusR\x06errors\"f\n" + + "\x06Status\x12\x12\n" + + "\x04code\x18\x01 \x01(\x05R\x04code\x12\x18\n" + + "\amessage\x18\x02 \x01(\tR\amessage\x12.\n" + + "\adetails\x18\x03 \x03(\v2\x14.google.protobuf.AnyR\adetails\"\"\n" + + "\n" + + "UnknownSet\x12\x14\n" + + "\x05exprs\x18\x01 \x03(\x03R\x05exprsB,\n" + + "\fdev.cel.exprB\tEvalProtoP\x01Z\fcel.dev/expr\xf8\x01\x01b\x06proto3" var ( file_cel_expr_eval_proto_rawDescOnce sync.Once - file_cel_expr_eval_proto_rawDescData = file_cel_expr_eval_proto_rawDesc + file_cel_expr_eval_proto_rawDescData []byte ) func file_cel_expr_eval_proto_rawDescGZIP() []byte { file_cel_expr_eval_proto_rawDescOnce.Do(func() { - file_cel_expr_eval_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_eval_proto_rawDescData) + file_cel_expr_eval_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_cel_expr_eval_proto_rawDesc), len(file_cel_expr_eval_proto_rawDesc))) }) return file_cel_expr_eval_proto_rawDescData } @@ -470,7 +452,7 @@ func file_cel_expr_eval_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_cel_expr_eval_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_cel_expr_eval_proto_rawDesc), len(file_cel_expr_eval_proto_rawDesc)), NumEnums: 0, NumMessages: 6, NumExtensions: 0, @@ -481,7 +463,6 @@ func file_cel_expr_eval_proto_init() { MessageInfos: file_cel_expr_eval_proto_msgTypes, }.Build() File_cel_expr_eval_proto = out.File - file_cel_expr_eval_proto_rawDesc = nil file_cel_expr_eval_proto_goTypes = nil file_cel_expr_eval_proto_depIdxs = nil } diff --git a/vendor/cel.dev/expr/explain.pb.go b/vendor/cel.dev/expr/explain.pb.go index 79fd5443b..423993397 100644 --- a/vendor/cel.dev/expr/explain.pb.go +++ b/vendor/cel.dev/expr/explain.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.5 +// protoc-gen-go v1.36.10 +// protoc v5.27.1 // source: cel/expr/explain.proto package expr @@ -11,6 +11,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + unsafe "unsafe" ) const ( @@ -20,23 +21,20 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// Deprecated: Do not use. +// Deprecated: Marked as deprecated in cel/expr/explain.proto. type Explain struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + ExprSteps []*Explain_ExprStep `protobuf:"bytes,2,rep,name=expr_steps,json=exprSteps,proto3" json:"expr_steps,omitempty"` unknownFields protoimpl.UnknownFields - - Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` - ExprSteps []*Explain_ExprStep `protobuf:"bytes,2,rep,name=expr_steps,json=exprSteps,proto3" json:"expr_steps,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Explain) Reset() { *x = Explain{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_explain_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_explain_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Explain) String() string { @@ -47,7 +45,7 @@ func (*Explain) ProtoMessage() {} func (x *Explain) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_explain_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -77,21 +75,18 @@ func (x *Explain) GetExprSteps() []*Explain_ExprStep { } type Explain_ExprStep struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + ValueIndex int32 `protobuf:"varint,2,opt,name=value_index,json=valueIndex,proto3" json:"value_index,omitempty"` unknownFields protoimpl.UnknownFields - - Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` - ValueIndex int32 `protobuf:"varint,2,opt,name=value_index,json=valueIndex,proto3" json:"value_index,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Explain_ExprStep) Reset() { *x = Explain_ExprStep{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_explain_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_explain_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Explain_ExprStep) String() string { @@ -102,7 +97,7 @@ func (*Explain_ExprStep) ProtoMessage() {} func (x *Explain_ExprStep) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_explain_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -133,42 +128,33 @@ func (x *Explain_ExprStep) GetValueIndex() int32 { var File_cel_expr_explain_proto protoreflect.FileDescriptor -var file_cel_expr_explain_proto_rawDesc = []byte{ - 0x0a, 0x16, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x65, 0x78, 0x70, 0x6c, 0x61, - 0x69, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, - 0x70, 0x72, 0x1a, 0x14, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xae, 0x01, 0x0a, 0x07, 0x45, 0x78, 0x70, - 0x6c, 0x61, 0x69, 0x6e, 0x12, 0x27, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x39, 0x0a, - 0x0a, 0x65, 0x78, 0x70, 0x72, 0x5f, 0x73, 0x74, 0x65, 0x70, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, - 0x6c, 0x61, 0x69, 0x6e, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x53, 0x74, 0x65, 0x70, 0x52, 0x09, 0x65, - 0x78, 0x70, 0x72, 0x53, 0x74, 0x65, 0x70, 0x73, 0x1a, 0x3b, 0x0a, 0x08, 0x45, 0x78, 0x70, 0x72, - 0x53, 0x74, 0x65, 0x70, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x02, 0x69, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x69, 0x6e, - 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x49, 0x6e, 0x64, 0x65, 0x78, 0x3a, 0x02, 0x18, 0x01, 0x42, 0x2f, 0x0a, 0x0c, 0x64, 0x65, 0x76, - 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0c, 0x45, 0x78, 0x70, 0x6c, 0x61, - 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, - 0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, -} +const file_cel_expr_explain_proto_rawDesc = "" + + "\n" + + "\x16cel/expr/explain.proto\x12\bcel.expr\x1a\x14cel/expr/value.proto\"\xae\x01\n" + + "\aExplain\x12'\n" + + "\x06values\x18\x01 \x03(\v2\x0f.cel.expr.ValueR\x06values\x129\n" + + "\n" + + "expr_steps\x18\x02 \x03(\v2\x1a.cel.expr.Explain.ExprStepR\texprSteps\x1a;\n" + + "\bExprStep\x12\x0e\n" + + "\x02id\x18\x01 \x01(\x03R\x02id\x12\x1f\n" + + "\vvalue_index\x18\x02 \x01(\x05R\n" + + "valueIndex:\x02\x18\x01B/\n" + + "\fdev.cel.exprB\fExplainProtoP\x01Z\fcel.dev/expr\xf8\x01\x01b\x06proto3" var ( file_cel_expr_explain_proto_rawDescOnce sync.Once - file_cel_expr_explain_proto_rawDescData = file_cel_expr_explain_proto_rawDesc + file_cel_expr_explain_proto_rawDescData []byte ) func file_cel_expr_explain_proto_rawDescGZIP() []byte { file_cel_expr_explain_proto_rawDescOnce.Do(func() { - file_cel_expr_explain_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_explain_proto_rawDescData) + file_cel_expr_explain_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_cel_expr_explain_proto_rawDesc), len(file_cel_expr_explain_proto_rawDesc))) }) return file_cel_expr_explain_proto_rawDescData } var file_cel_expr_explain_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_cel_expr_explain_proto_goTypes = []interface{}{ +var file_cel_expr_explain_proto_goTypes = []any{ (*Explain)(nil), // 0: cel.expr.Explain (*Explain_ExprStep)(nil), // 1: cel.expr.Explain.ExprStep (*Value)(nil), // 2: cel.expr.Value @@ -189,37 +175,11 @@ func file_cel_expr_explain_proto_init() { return } file_cel_expr_value_proto_init() - if !protoimpl.UnsafeEnabled { - file_cel_expr_explain_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Explain); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_explain_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Explain_ExprStep); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_cel_expr_explain_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_cel_expr_explain_proto_rawDesc), len(file_cel_expr_explain_proto_rawDesc)), NumEnums: 0, NumMessages: 2, NumExtensions: 0, @@ -230,7 +190,6 @@ func file_cel_expr_explain_proto_init() { MessageInfos: file_cel_expr_explain_proto_msgTypes, }.Build() File_cel_expr_explain_proto = out.File - file_cel_expr_explain_proto_rawDesc = nil file_cel_expr_explain_proto_goTypes = nil file_cel_expr_explain_proto_depIdxs = nil } diff --git a/vendor/cel.dev/expr/syntax.pb.go b/vendor/cel.dev/expr/syntax.pb.go index 48a952872..72d19b20d 100644 --- a/vendor/cel.dev/expr/syntax.pb.go +++ b/vendor/cel.dev/expr/syntax.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.5 +// protoc-gen-go v1.36.10 +// protoc v5.27.1 // source: cel/expr/syntax.proto package expr @@ -14,6 +14,7 @@ import ( timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" sync "sync" + unsafe "unsafe" ) const ( @@ -76,21 +77,18 @@ func (SourceInfo_Extension_Component) EnumDescriptor() ([]byte, []int) { } type ParsedExpr struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Expr *Expr `protobuf:"bytes,2,opt,name=expr,proto3" json:"expr,omitempty"` + SourceInfo *SourceInfo `protobuf:"bytes,3,opt,name=source_info,json=sourceInfo,proto3" json:"source_info,omitempty"` unknownFields protoimpl.UnknownFields - - Expr *Expr `protobuf:"bytes,2,opt,name=expr,proto3" json:"expr,omitempty"` - SourceInfo *SourceInfo `protobuf:"bytes,3,opt,name=source_info,json=sourceInfo,proto3" json:"source_info,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ParsedExpr) Reset() { *x = ParsedExpr{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_syntax_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_syntax_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ParsedExpr) String() string { @@ -101,7 +99,7 @@ func (*ParsedExpr) ProtoMessage() {} func (x *ParsedExpr) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_syntax_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -131,12 +129,9 @@ func (x *ParsedExpr) GetSourceInfo() *SourceInfo { } type Expr struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Id int64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"` - // Types that are assignable to ExprKind: + state protoimpl.MessageState `protogen:"open.v1"` + Id int64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"` + // Types that are valid to be assigned to ExprKind: // // *Expr_ConstExpr // *Expr_IdentExpr @@ -145,16 +140,16 @@ type Expr struct { // *Expr_ListExpr // *Expr_StructExpr // *Expr_ComprehensionExpr - ExprKind isExpr_ExprKind `protobuf_oneof:"expr_kind"` + ExprKind isExpr_ExprKind `protobuf_oneof:"expr_kind"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Expr) Reset() { *x = Expr{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_syntax_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_syntax_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Expr) String() string { @@ -165,7 +160,7 @@ func (*Expr) ProtoMessage() {} func (x *Expr) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_syntax_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -187,58 +182,72 @@ func (x *Expr) GetId() int64 { return 0 } -func (m *Expr) GetExprKind() isExpr_ExprKind { - if m != nil { - return m.ExprKind +func (x *Expr) GetExprKind() isExpr_ExprKind { + if x != nil { + return x.ExprKind } return nil } func (x *Expr) GetConstExpr() *Constant { - if x, ok := x.GetExprKind().(*Expr_ConstExpr); ok { - return x.ConstExpr + if x != nil { + if x, ok := x.ExprKind.(*Expr_ConstExpr); ok { + return x.ConstExpr + } } return nil } func (x *Expr) GetIdentExpr() *Expr_Ident { - if x, ok := x.GetExprKind().(*Expr_IdentExpr); ok { - return x.IdentExpr + if x != nil { + if x, ok := x.ExprKind.(*Expr_IdentExpr); ok { + return x.IdentExpr + } } return nil } func (x *Expr) GetSelectExpr() *Expr_Select { - if x, ok := x.GetExprKind().(*Expr_SelectExpr); ok { - return x.SelectExpr + if x != nil { + if x, ok := x.ExprKind.(*Expr_SelectExpr); ok { + return x.SelectExpr + } } return nil } func (x *Expr) GetCallExpr() *Expr_Call { - if x, ok := x.GetExprKind().(*Expr_CallExpr); ok { - return x.CallExpr + if x != nil { + if x, ok := x.ExprKind.(*Expr_CallExpr); ok { + return x.CallExpr + } } return nil } func (x *Expr) GetListExpr() *Expr_CreateList { - if x, ok := x.GetExprKind().(*Expr_ListExpr); ok { - return x.ListExpr + if x != nil { + if x, ok := x.ExprKind.(*Expr_ListExpr); ok { + return x.ListExpr + } } return nil } func (x *Expr) GetStructExpr() *Expr_CreateStruct { - if x, ok := x.GetExprKind().(*Expr_StructExpr); ok { - return x.StructExpr + if x != nil { + if x, ok := x.ExprKind.(*Expr_StructExpr); ok { + return x.StructExpr + } } return nil } func (x *Expr) GetComprehensionExpr() *Expr_Comprehension { - if x, ok := x.GetExprKind().(*Expr_ComprehensionExpr); ok { - return x.ComprehensionExpr + if x != nil { + if x, ok := x.ExprKind.(*Expr_ComprehensionExpr); ok { + return x.ComprehensionExpr + } } return nil } @@ -290,11 +299,8 @@ func (*Expr_StructExpr) isExpr_ExprKind() {} func (*Expr_ComprehensionExpr) isExpr_ExprKind() {} type Constant struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to ConstantKind: + state protoimpl.MessageState `protogen:"open.v1"` + // Types that are valid to be assigned to ConstantKind: // // *Constant_NullValue // *Constant_BoolValue @@ -305,16 +311,16 @@ type Constant struct { // *Constant_BytesValue // *Constant_DurationValue // *Constant_TimestampValue - ConstantKind isConstant_ConstantKind `protobuf_oneof:"constant_kind"` + ConstantKind isConstant_ConstantKind `protobuf_oneof:"constant_kind"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Constant) Reset() { *x = Constant{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_syntax_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_syntax_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Constant) String() string { @@ -325,7 +331,7 @@ func (*Constant) ProtoMessage() {} func (x *Constant) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_syntax_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -340,74 +346,92 @@ func (*Constant) Descriptor() ([]byte, []int) { return file_cel_expr_syntax_proto_rawDescGZIP(), []int{2} } -func (m *Constant) GetConstantKind() isConstant_ConstantKind { - if m != nil { - return m.ConstantKind +func (x *Constant) GetConstantKind() isConstant_ConstantKind { + if x != nil { + return x.ConstantKind } return nil } func (x *Constant) GetNullValue() structpb.NullValue { - if x, ok := x.GetConstantKind().(*Constant_NullValue); ok { - return x.NullValue + if x != nil { + if x, ok := x.ConstantKind.(*Constant_NullValue); ok { + return x.NullValue + } } return structpb.NullValue(0) } func (x *Constant) GetBoolValue() bool { - if x, ok := x.GetConstantKind().(*Constant_BoolValue); ok { - return x.BoolValue + if x != nil { + if x, ok := x.ConstantKind.(*Constant_BoolValue); ok { + return x.BoolValue + } } return false } func (x *Constant) GetInt64Value() int64 { - if x, ok := x.GetConstantKind().(*Constant_Int64Value); ok { - return x.Int64Value + if x != nil { + if x, ok := x.ConstantKind.(*Constant_Int64Value); ok { + return x.Int64Value + } } return 0 } func (x *Constant) GetUint64Value() uint64 { - if x, ok := x.GetConstantKind().(*Constant_Uint64Value); ok { - return x.Uint64Value + if x != nil { + if x, ok := x.ConstantKind.(*Constant_Uint64Value); ok { + return x.Uint64Value + } } return 0 } func (x *Constant) GetDoubleValue() float64 { - if x, ok := x.GetConstantKind().(*Constant_DoubleValue); ok { - return x.DoubleValue + if x != nil { + if x, ok := x.ConstantKind.(*Constant_DoubleValue); ok { + return x.DoubleValue + } } return 0 } func (x *Constant) GetStringValue() string { - if x, ok := x.GetConstantKind().(*Constant_StringValue); ok { - return x.StringValue + if x != nil { + if x, ok := x.ConstantKind.(*Constant_StringValue); ok { + return x.StringValue + } } return "" } func (x *Constant) GetBytesValue() []byte { - if x, ok := x.GetConstantKind().(*Constant_BytesValue); ok { - return x.BytesValue + if x != nil { + if x, ok := x.ConstantKind.(*Constant_BytesValue); ok { + return x.BytesValue + } } return nil } -// Deprecated: Do not use. +// Deprecated: Marked as deprecated in cel/expr/syntax.proto. func (x *Constant) GetDurationValue() *durationpb.Duration { - if x, ok := x.GetConstantKind().(*Constant_DurationValue); ok { - return x.DurationValue + if x != nil { + if x, ok := x.ConstantKind.(*Constant_DurationValue); ok { + return x.DurationValue + } } return nil } -// Deprecated: Do not use. +// Deprecated: Marked as deprecated in cel/expr/syntax.proto. func (x *Constant) GetTimestampValue() *timestamppb.Timestamp { - if x, ok := x.GetConstantKind().(*Constant_TimestampValue); ok { - return x.TimestampValue + if x != nil { + if x, ok := x.ConstantKind.(*Constant_TimestampValue); ok { + return x.TimestampValue + } } return nil } @@ -445,12 +469,12 @@ type Constant_BytesValue struct { } type Constant_DurationValue struct { - // Deprecated: Do not use. + // Deprecated: Marked as deprecated in cel/expr/syntax.proto. DurationValue *durationpb.Duration `protobuf:"bytes,8,opt,name=duration_value,json=durationValue,proto3,oneof"` } type Constant_TimestampValue struct { - // Deprecated: Do not use. + // Deprecated: Marked as deprecated in cel/expr/syntax.proto. TimestampValue *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=timestamp_value,json=timestampValue,proto3,oneof"` } @@ -473,25 +497,22 @@ func (*Constant_DurationValue) isConstant_ConstantKind() {} func (*Constant_TimestampValue) isConstant_ConstantKind() {} type SourceInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` SyntaxVersion string `protobuf:"bytes,1,opt,name=syntax_version,json=syntaxVersion,proto3" json:"syntax_version,omitempty"` Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"` LineOffsets []int32 `protobuf:"varint,3,rep,packed,name=line_offsets,json=lineOffsets,proto3" json:"line_offsets,omitempty"` - Positions map[int64]int32 `protobuf:"bytes,4,rep,name=positions,proto3" json:"positions,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` - MacroCalls map[int64]*Expr `protobuf:"bytes,5,rep,name=macro_calls,json=macroCalls,proto3" json:"macro_calls,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Positions map[int64]int32 `protobuf:"bytes,4,rep,name=positions,proto3" json:"positions,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + MacroCalls map[int64]*Expr `protobuf:"bytes,5,rep,name=macro_calls,json=macroCalls,proto3" json:"macro_calls,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` Extensions []*SourceInfo_Extension `protobuf:"bytes,6,rep,name=extensions,proto3" json:"extensions,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *SourceInfo) Reset() { *x = SourceInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_syntax_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_syntax_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SourceInfo) String() string { @@ -502,7 +523,7 @@ func (*SourceInfo) ProtoMessage() {} func (x *SourceInfo) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_syntax_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -560,20 +581,17 @@ func (x *SourceInfo) GetExtensions() []*SourceInfo_Extension { } type Expr_Ident struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` unknownFields protoimpl.UnknownFields - - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Expr_Ident) Reset() { *x = Expr_Ident{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_syntax_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_syntax_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Expr_Ident) String() string { @@ -584,7 +602,7 @@ func (*Expr_Ident) ProtoMessage() {} func (x *Expr_Ident) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_syntax_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -607,22 +625,19 @@ func (x *Expr_Ident) GetName() string { } type Expr_Select struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Operand *Expr `protobuf:"bytes,1,opt,name=operand,proto3" json:"operand,omitempty"` + Field string `protobuf:"bytes,2,opt,name=field,proto3" json:"field,omitempty"` + TestOnly bool `protobuf:"varint,3,opt,name=test_only,json=testOnly,proto3" json:"test_only,omitempty"` unknownFields protoimpl.UnknownFields - - Operand *Expr `protobuf:"bytes,1,opt,name=operand,proto3" json:"operand,omitempty"` - Field string `protobuf:"bytes,2,opt,name=field,proto3" json:"field,omitempty"` - TestOnly bool `protobuf:"varint,3,opt,name=test_only,json=testOnly,proto3" json:"test_only,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Expr_Select) Reset() { *x = Expr_Select{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_syntax_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_syntax_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Expr_Select) String() string { @@ -633,7 +648,7 @@ func (*Expr_Select) ProtoMessage() {} func (x *Expr_Select) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_syntax_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -670,22 +685,19 @@ func (x *Expr_Select) GetTestOnly() bool { } type Expr_Call struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Target *Expr `protobuf:"bytes,1,opt,name=target,proto3" json:"target,omitempty"` + Function string `protobuf:"bytes,2,opt,name=function,proto3" json:"function,omitempty"` + Args []*Expr `protobuf:"bytes,3,rep,name=args,proto3" json:"args,omitempty"` unknownFields protoimpl.UnknownFields - - Target *Expr `protobuf:"bytes,1,opt,name=target,proto3" json:"target,omitempty"` - Function string `protobuf:"bytes,2,opt,name=function,proto3" json:"function,omitempty"` - Args []*Expr `protobuf:"bytes,3,rep,name=args,proto3" json:"args,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Expr_Call) Reset() { *x = Expr_Call{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_syntax_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_syntax_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Expr_Call) String() string { @@ -696,7 +708,7 @@ func (*Expr_Call) ProtoMessage() {} func (x *Expr_Call) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_syntax_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -733,21 +745,18 @@ func (x *Expr_Call) GetArgs() []*Expr { } type Expr_CreateList struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Elements []*Expr `protobuf:"bytes,1,rep,name=elements,proto3" json:"elements,omitempty"` - OptionalIndices []int32 `protobuf:"varint,2,rep,packed,name=optional_indices,json=optionalIndices,proto3" json:"optional_indices,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + Elements []*Expr `protobuf:"bytes,1,rep,name=elements,proto3" json:"elements,omitempty"` + OptionalIndices []int32 `protobuf:"varint,2,rep,packed,name=optional_indices,json=optionalIndices,proto3" json:"optional_indices,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Expr_CreateList) Reset() { *x = Expr_CreateList{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_syntax_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_syntax_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Expr_CreateList) String() string { @@ -758,7 +767,7 @@ func (*Expr_CreateList) ProtoMessage() {} func (x *Expr_CreateList) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_syntax_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -788,21 +797,18 @@ func (x *Expr_CreateList) GetOptionalIndices() []int32 { } type Expr_CreateStruct struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + MessageName string `protobuf:"bytes,1,opt,name=message_name,json=messageName,proto3" json:"message_name,omitempty"` + Entries []*Expr_CreateStruct_Entry `protobuf:"bytes,2,rep,name=entries,proto3" json:"entries,omitempty"` unknownFields protoimpl.UnknownFields - - MessageName string `protobuf:"bytes,1,opt,name=message_name,json=messageName,proto3" json:"message_name,omitempty"` - Entries []*Expr_CreateStruct_Entry `protobuf:"bytes,2,rep,name=entries,proto3" json:"entries,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Expr_CreateStruct) Reset() { *x = Expr_CreateStruct{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_syntax_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_syntax_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Expr_CreateStruct) String() string { @@ -813,7 +819,7 @@ func (*Expr_CreateStruct) ProtoMessage() {} func (x *Expr_CreateStruct) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_syntax_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -843,26 +849,24 @@ func (x *Expr_CreateStruct) GetEntries() []*Expr_CreateStruct_Entry { } type Expr_Comprehension struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + IterVar string `protobuf:"bytes,1,opt,name=iter_var,json=iterVar,proto3" json:"iter_var,omitempty"` + IterVar2 string `protobuf:"bytes,8,opt,name=iter_var2,json=iterVar2,proto3" json:"iter_var2,omitempty"` + IterRange *Expr `protobuf:"bytes,2,opt,name=iter_range,json=iterRange,proto3" json:"iter_range,omitempty"` + AccuVar string `protobuf:"bytes,3,opt,name=accu_var,json=accuVar,proto3" json:"accu_var,omitempty"` + AccuInit *Expr `protobuf:"bytes,4,opt,name=accu_init,json=accuInit,proto3" json:"accu_init,omitempty"` + LoopCondition *Expr `protobuf:"bytes,5,opt,name=loop_condition,json=loopCondition,proto3" json:"loop_condition,omitempty"` + LoopStep *Expr `protobuf:"bytes,6,opt,name=loop_step,json=loopStep,proto3" json:"loop_step,omitempty"` + Result *Expr `protobuf:"bytes,7,opt,name=result,proto3" json:"result,omitempty"` unknownFields protoimpl.UnknownFields - - IterVar string `protobuf:"bytes,1,opt,name=iter_var,json=iterVar,proto3" json:"iter_var,omitempty"` - IterRange *Expr `protobuf:"bytes,2,opt,name=iter_range,json=iterRange,proto3" json:"iter_range,omitempty"` - AccuVar string `protobuf:"bytes,3,opt,name=accu_var,json=accuVar,proto3" json:"accu_var,omitempty"` - AccuInit *Expr `protobuf:"bytes,4,opt,name=accu_init,json=accuInit,proto3" json:"accu_init,omitempty"` - LoopCondition *Expr `protobuf:"bytes,5,opt,name=loop_condition,json=loopCondition,proto3" json:"loop_condition,omitempty"` - LoopStep *Expr `protobuf:"bytes,6,opt,name=loop_step,json=loopStep,proto3" json:"loop_step,omitempty"` - Result *Expr `protobuf:"bytes,7,opt,name=result,proto3" json:"result,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Expr_Comprehension) Reset() { *x = Expr_Comprehension{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_syntax_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_syntax_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Expr_Comprehension) String() string { @@ -873,7 +877,7 @@ func (*Expr_Comprehension) ProtoMessage() {} func (x *Expr_Comprehension) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_syntax_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -895,6 +899,13 @@ func (x *Expr_Comprehension) GetIterVar() string { return "" } +func (x *Expr_Comprehension) GetIterVar2() string { + if x != nil { + return x.IterVar2 + } + return "" +} + func (x *Expr_Comprehension) GetIterRange() *Expr { if x != nil { return x.IterRange @@ -938,27 +949,24 @@ func (x *Expr_Comprehension) GetResult() *Expr { } type Expr_CreateStruct_Entry struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` - // Types that are assignable to KeyKind: + state protoimpl.MessageState `protogen:"open.v1"` + Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + // Types that are valid to be assigned to KeyKind: // // *Expr_CreateStruct_Entry_FieldKey // *Expr_CreateStruct_Entry_MapKey KeyKind isExpr_CreateStruct_Entry_KeyKind `protobuf_oneof:"key_kind"` Value *Expr `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"` OptionalEntry bool `protobuf:"varint,5,opt,name=optional_entry,json=optionalEntry,proto3" json:"optional_entry,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Expr_CreateStruct_Entry) Reset() { *x = Expr_CreateStruct_Entry{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_syntax_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_syntax_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Expr_CreateStruct_Entry) String() string { @@ -969,7 +977,7 @@ func (*Expr_CreateStruct_Entry) ProtoMessage() {} func (x *Expr_CreateStruct_Entry) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_syntax_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -991,23 +999,27 @@ func (x *Expr_CreateStruct_Entry) GetId() int64 { return 0 } -func (m *Expr_CreateStruct_Entry) GetKeyKind() isExpr_CreateStruct_Entry_KeyKind { - if m != nil { - return m.KeyKind +func (x *Expr_CreateStruct_Entry) GetKeyKind() isExpr_CreateStruct_Entry_KeyKind { + if x != nil { + return x.KeyKind } return nil } func (x *Expr_CreateStruct_Entry) GetFieldKey() string { - if x, ok := x.GetKeyKind().(*Expr_CreateStruct_Entry_FieldKey); ok { - return x.FieldKey + if x != nil { + if x, ok := x.KeyKind.(*Expr_CreateStruct_Entry_FieldKey); ok { + return x.FieldKey + } } return "" } func (x *Expr_CreateStruct_Entry) GetMapKey() *Expr { - if x, ok := x.GetKeyKind().(*Expr_CreateStruct_Entry_MapKey); ok { - return x.MapKey + if x != nil { + if x, ok := x.KeyKind.(*Expr_CreateStruct_Entry_MapKey); ok { + return x.MapKey + } } return nil } @@ -1043,22 +1055,19 @@ func (*Expr_CreateStruct_Entry_FieldKey) isExpr_CreateStruct_Entry_KeyKind() {} func (*Expr_CreateStruct_Entry_MapKey) isExpr_CreateStruct_Entry_KeyKind() {} type SourceInfo_Extension struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` AffectedComponents []SourceInfo_Extension_Component `protobuf:"varint,2,rep,packed,name=affected_components,json=affectedComponents,proto3,enum=cel.expr.SourceInfo_Extension_Component" json:"affected_components,omitempty"` Version *SourceInfo_Extension_Version `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *SourceInfo_Extension) Reset() { *x = SourceInfo_Extension{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_syntax_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_syntax_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SourceInfo_Extension) String() string { @@ -1069,7 +1078,7 @@ func (*SourceInfo_Extension) ProtoMessage() {} func (x *SourceInfo_Extension) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_syntax_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1106,21 +1115,18 @@ func (x *SourceInfo_Extension) GetVersion() *SourceInfo_Extension_Version { } type SourceInfo_Extension_Version struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Major int64 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"` + Minor int64 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"` unknownFields protoimpl.UnknownFields - - Major int64 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"` - Minor int64 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"` + sizeCache protoimpl.SizeCache } func (x *SourceInfo_Extension_Version) Reset() { *x = SourceInfo_Extension_Version{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_syntax_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_syntax_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SourceInfo_Extension_Version) String() string { @@ -1131,7 +1137,7 @@ func (*SourceInfo_Extension_Version) ProtoMessage() {} func (x *SourceInfo_Extension_Version) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_syntax_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1162,210 +1168,124 @@ func (x *SourceInfo_Extension_Version) GetMinor() int64 { var File_cel_expr_syntax_proto protoreflect.FileDescriptor -var file_cel_expr_syntax_proto_rawDesc = []byte{ - 0x0a, 0x15, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x73, 0x79, 0x6e, 0x74, 0x61, - 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, - 0x72, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, - 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x22, 0x67, 0x0a, 0x0a, 0x50, 0x61, 0x72, 0x73, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x12, 0x22, - 0x0a, 0x04, 0x65, 0x78, 0x70, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, - 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x04, 0x65, 0x78, - 0x70, 0x72, 0x12, 0x35, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, - 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, - 0x70, 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0a, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0xfd, 0x0a, 0x0a, 0x04, 0x45, 0x78, - 0x70, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, - 0x69, 0x64, 0x12, 0x33, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, - 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x09, 0x63, 0x6f, - 0x6e, 0x73, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x35, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, - 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, - 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x49, 0x64, 0x65, 0x6e, - 0x74, 0x48, 0x00, 0x52, 0x09, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x38, - 0x0a, 0x0b, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, - 0x78, 0x70, 0x72, 0x2e, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x73, 0x65, - 0x6c, 0x65, 0x63, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x32, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c, - 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x65, - 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x61, 0x6c, 0x6c, - 0x48, 0x00, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x45, 0x78, 0x70, 0x72, 0x12, 0x38, 0x0a, 0x09, - 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x19, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, - 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x08, 0x6c, 0x69, - 0x73, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x3e, 0x0a, 0x0b, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, - 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x65, - 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x73, 0x74, 0x72, 0x75, - 0x63, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x4d, 0x0a, 0x12, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, - 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x09, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, - 0x70, 0x72, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x48, 0x00, 0x52, 0x11, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x45, 0x78, 0x70, 0x72, 0x1a, 0x1b, 0x0a, 0x05, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x12, 0x12, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x1a, 0x65, 0x0a, 0x06, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x12, 0x28, 0x0a, 0x07, - 0x6f, 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, - 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x07, 0x6f, - 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x1b, 0x0a, 0x09, - 0x74, 0x65, 0x73, 0x74, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x08, 0x74, 0x65, 0x73, 0x74, 0x4f, 0x6e, 0x6c, 0x79, 0x1a, 0x6e, 0x0a, 0x04, 0x43, 0x61, 0x6c, - 0x6c, 0x12, 0x26, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, - 0x72, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x66, 0x75, 0x6e, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6e, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x73, 0x18, 0x03, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, - 0x78, 0x70, 0x72, 0x52, 0x04, 0x61, 0x72, 0x67, 0x73, 0x1a, 0x63, 0x0a, 0x0a, 0x43, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x08, 0x65, 0x6c, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, - 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x65, 0x6c, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, - 0x69, 0x6e, 0x64, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0f, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x49, 0x6e, 0x64, 0x69, 0x63, 0x65, 0x73, 0x1a, 0xab, - 0x02, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x12, - 0x21, 0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4e, 0x61, - 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, - 0x78, 0x70, 0x72, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, - 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x1a, - 0xba, 0x01, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x09, 0x66, 0x69, 0x65, - 0x6c, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, - 0x66, 0x69, 0x65, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x07, 0x6d, 0x61, 0x70, 0x5f, - 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, - 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x48, 0x00, 0x52, 0x06, 0x6d, 0x61, 0x70, - 0x4b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, - 0x70, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x0d, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x42, 0x0a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x1a, 0xad, 0x02, 0x0a, - 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x19, - 0x0a, 0x08, 0x69, 0x74, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x07, 0x69, 0x74, 0x65, 0x72, 0x56, 0x61, 0x72, 0x12, 0x2d, 0x0a, 0x0a, 0x69, 0x74, 0x65, - 0x72, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, - 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x09, 0x69, - 0x74, 0x65, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x61, 0x63, 0x63, 0x75, - 0x5f, 0x76, 0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x63, 0x63, 0x75, - 0x56, 0x61, 0x72, 0x12, 0x2b, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x75, 0x5f, 0x69, 0x6e, 0x69, 0x74, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, - 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x61, 0x63, 0x63, 0x75, 0x49, 0x6e, 0x69, 0x74, - 0x12, 0x35, 0x0a, 0x0e, 0x6c, 0x6f, 0x6f, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, - 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x0d, 0x6c, 0x6f, 0x6f, 0x70, 0x43, 0x6f, - 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x09, 0x6c, 0x6f, 0x6f, 0x70, 0x5f, - 0x73, 0x74, 0x65, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, - 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x6c, 0x6f, 0x6f, 0x70, - 0x53, 0x74, 0x65, 0x70, 0x12, 0x26, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x07, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, - 0x45, 0x78, 0x70, 0x72, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x42, 0x0b, 0x0a, 0x09, - 0x65, 0x78, 0x70, 0x72, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xc1, 0x03, 0x0a, 0x08, 0x43, 0x6f, - 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, - 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0a, 0x69, 0x6e, 0x74, - 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x75, 0x69, 0x6e, 0x74, 0x36, - 0x34, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, - 0x0b, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, - 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, - 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62, - 0x79, 0x74, 0x65, 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x46, 0x0a, 0x0e, 0x64, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x18, 0x01, - 0x48, 0x00, 0x52, 0x0d, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x12, 0x49, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x0e, 0x74, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0f, 0x0a, 0x0d, - 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xac, 0x06, - 0x0a, 0x0a, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x25, 0x0a, 0x0e, - 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x21, 0x0a, 0x0c, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x73, 0x18, - 0x03, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0b, 0x6c, 0x69, 0x6e, 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65, - 0x74, 0x73, 0x12, 0x41, 0x0a, 0x09, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, - 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x50, 0x6f, 0x73, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x70, 0x6f, 0x73, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x45, 0x0a, 0x0b, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x5f, 0x63, - 0x61, 0x6c, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x65, 0x6c, - 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, - 0x2e, 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x52, 0x0a, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x12, 0x3e, 0x0a, 0x0a, - 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x1e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x3c, 0x0a, 0x0e, - 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, - 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, - 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4d, 0x0a, 0x0f, 0x4d, 0x61, - 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, - 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, - 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, - 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0xe0, 0x02, 0x0a, 0x09, 0x45, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x59, 0x0a, 0x13, 0x61, 0x66, 0x66, 0x65, 0x63, - 0x74, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, - 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x52, 0x12, - 0x61, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, - 0x74, 0x73, 0x12, 0x40, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x53, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x1a, 0x35, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, - 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, - 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x22, 0x6f, 0x0a, 0x09, 0x43, - 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x12, 0x19, 0x0a, 0x15, 0x43, 0x4f, 0x4d, 0x50, - 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, - 0x44, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x43, 0x4f, 0x4d, 0x50, 0x4f, 0x4e, 0x45, 0x4e, 0x54, - 0x5f, 0x50, 0x41, 0x52, 0x53, 0x45, 0x52, 0x10, 0x01, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4d, - 0x50, 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x48, 0x45, 0x43, - 0x4b, 0x45, 0x52, 0x10, 0x02, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x4f, 0x4d, 0x50, 0x4f, 0x4e, 0x45, - 0x4e, 0x54, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x42, 0x2e, 0x0a, 0x0c, - 0x64, 0x65, 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0b, 0x53, 0x79, - 0x6e, 0x74, 0x61, 0x78, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, - 0x2e, 0x64, 0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, -} +const file_cel_expr_syntax_proto_rawDesc = "" + + "\n" + + "\x15cel/expr/syntax.proto\x12\bcel.expr\x1a\x1egoogle/protobuf/duration.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"g\n" + + "\n" + + "ParsedExpr\x12\"\n" + + "\x04expr\x18\x02 \x01(\v2\x0e.cel.expr.ExprR\x04expr\x125\n" + + "\vsource_info\x18\x03 \x01(\v2\x14.cel.expr.SourceInfoR\n" + + "sourceInfo\"\x9a\v\n" + + "\x04Expr\x12\x0e\n" + + "\x02id\x18\x02 \x01(\x03R\x02id\x123\n" + + "\n" + + "const_expr\x18\x03 \x01(\v2\x12.cel.expr.ConstantH\x00R\tconstExpr\x125\n" + + "\n" + + "ident_expr\x18\x04 \x01(\v2\x14.cel.expr.Expr.IdentH\x00R\tidentExpr\x128\n" + + "\vselect_expr\x18\x05 \x01(\v2\x15.cel.expr.Expr.SelectH\x00R\n" + + "selectExpr\x122\n" + + "\tcall_expr\x18\x06 \x01(\v2\x13.cel.expr.Expr.CallH\x00R\bcallExpr\x128\n" + + "\tlist_expr\x18\a \x01(\v2\x19.cel.expr.Expr.CreateListH\x00R\blistExpr\x12>\n" + + "\vstruct_expr\x18\b \x01(\v2\x1b.cel.expr.Expr.CreateStructH\x00R\n" + + "structExpr\x12M\n" + + "\x12comprehension_expr\x18\t \x01(\v2\x1c.cel.expr.Expr.ComprehensionH\x00R\x11comprehensionExpr\x1a\x1b\n" + + "\x05Ident\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x1ae\n" + + "\x06Select\x12(\n" + + "\aoperand\x18\x01 \x01(\v2\x0e.cel.expr.ExprR\aoperand\x12\x14\n" + + "\x05field\x18\x02 \x01(\tR\x05field\x12\x1b\n" + + "\ttest_only\x18\x03 \x01(\bR\btestOnly\x1an\n" + + "\x04Call\x12&\n" + + "\x06target\x18\x01 \x01(\v2\x0e.cel.expr.ExprR\x06target\x12\x1a\n" + + "\bfunction\x18\x02 \x01(\tR\bfunction\x12\"\n" + + "\x04args\x18\x03 \x03(\v2\x0e.cel.expr.ExprR\x04args\x1ac\n" + + "\n" + + "CreateList\x12*\n" + + "\belements\x18\x01 \x03(\v2\x0e.cel.expr.ExprR\belements\x12)\n" + + "\x10optional_indices\x18\x02 \x03(\x05R\x0foptionalIndices\x1a\xab\x02\n" + + "\fCreateStruct\x12!\n" + + "\fmessage_name\x18\x01 \x01(\tR\vmessageName\x12;\n" + + "\aentries\x18\x02 \x03(\v2!.cel.expr.Expr.CreateStruct.EntryR\aentries\x1a\xba\x01\n" + + "\x05Entry\x12\x0e\n" + + "\x02id\x18\x01 \x01(\x03R\x02id\x12\x1d\n" + + "\tfield_key\x18\x02 \x01(\tH\x00R\bfieldKey\x12)\n" + + "\amap_key\x18\x03 \x01(\v2\x0e.cel.expr.ExprH\x00R\x06mapKey\x12$\n" + + "\x05value\x18\x04 \x01(\v2\x0e.cel.expr.ExprR\x05value\x12%\n" + + "\x0eoptional_entry\x18\x05 \x01(\bR\roptionalEntryB\n" + + "\n" + + "\bkey_kind\x1a\xca\x02\n" + + "\rComprehension\x12\x19\n" + + "\biter_var\x18\x01 \x01(\tR\aiterVar\x12\x1b\n" + + "\titer_var2\x18\b \x01(\tR\biterVar2\x12-\n" + + "\n" + + "iter_range\x18\x02 \x01(\v2\x0e.cel.expr.ExprR\titerRange\x12\x19\n" + + "\baccu_var\x18\x03 \x01(\tR\aaccuVar\x12+\n" + + "\taccu_init\x18\x04 \x01(\v2\x0e.cel.expr.ExprR\baccuInit\x125\n" + + "\x0eloop_condition\x18\x05 \x01(\v2\x0e.cel.expr.ExprR\rloopCondition\x12+\n" + + "\tloop_step\x18\x06 \x01(\v2\x0e.cel.expr.ExprR\bloopStep\x12&\n" + + "\x06result\x18\a \x01(\v2\x0e.cel.expr.ExprR\x06resultB\v\n" + + "\texpr_kind\"\xc1\x03\n" + + "\bConstant\x12;\n" + + "\n" + + "null_value\x18\x01 \x01(\x0e2\x1a.google.protobuf.NullValueH\x00R\tnullValue\x12\x1f\n" + + "\n" + + "bool_value\x18\x02 \x01(\bH\x00R\tboolValue\x12!\n" + + "\vint64_value\x18\x03 \x01(\x03H\x00R\n" + + "int64Value\x12#\n" + + "\fuint64_value\x18\x04 \x01(\x04H\x00R\vuint64Value\x12#\n" + + "\fdouble_value\x18\x05 \x01(\x01H\x00R\vdoubleValue\x12#\n" + + "\fstring_value\x18\x06 \x01(\tH\x00R\vstringValue\x12!\n" + + "\vbytes_value\x18\a \x01(\fH\x00R\n" + + "bytesValue\x12F\n" + + "\x0eduration_value\x18\b \x01(\v2\x19.google.protobuf.DurationB\x02\x18\x01H\x00R\rdurationValue\x12I\n" + + "\x0ftimestamp_value\x18\t \x01(\v2\x1a.google.protobuf.TimestampB\x02\x18\x01H\x00R\x0etimestampValueB\x0f\n" + + "\rconstant_kind\"\xac\x06\n" + + "\n" + + "SourceInfo\x12%\n" + + "\x0esyntax_version\x18\x01 \x01(\tR\rsyntaxVersion\x12\x1a\n" + + "\blocation\x18\x02 \x01(\tR\blocation\x12!\n" + + "\fline_offsets\x18\x03 \x03(\x05R\vlineOffsets\x12A\n" + + "\tpositions\x18\x04 \x03(\v2#.cel.expr.SourceInfo.PositionsEntryR\tpositions\x12E\n" + + "\vmacro_calls\x18\x05 \x03(\v2$.cel.expr.SourceInfo.MacroCallsEntryR\n" + + "macroCalls\x12>\n" + + "\n" + + "extensions\x18\x06 \x03(\v2\x1e.cel.expr.SourceInfo.ExtensionR\n" + + "extensions\x1a<\n" + + "\x0ePositionsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\x03R\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\x05R\x05value:\x028\x01\x1aM\n" + + "\x0fMacroCallsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\x03R\x03key\x12$\n" + + "\x05value\x18\x02 \x01(\v2\x0e.cel.expr.ExprR\x05value:\x028\x01\x1a\xe0\x02\n" + + "\tExtension\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id\x12Y\n" + + "\x13affected_components\x18\x02 \x03(\x0e2(.cel.expr.SourceInfo.Extension.ComponentR\x12affectedComponents\x12@\n" + + "\aversion\x18\x03 \x01(\v2&.cel.expr.SourceInfo.Extension.VersionR\aversion\x1a5\n" + + "\aVersion\x12\x14\n" + + "\x05major\x18\x01 \x01(\x03R\x05major\x12\x14\n" + + "\x05minor\x18\x02 \x01(\x03R\x05minor\"o\n" + + "\tComponent\x12\x19\n" + + "\x15COMPONENT_UNSPECIFIED\x10\x00\x12\x14\n" + + "\x10COMPONENT_PARSER\x10\x01\x12\x1a\n" + + "\x16COMPONENT_TYPE_CHECKER\x10\x02\x12\x15\n" + + "\x11COMPONENT_RUNTIME\x10\x03B.\n" + + "\fdev.cel.exprB\vSyntaxProtoP\x01Z\fcel.dev/expr\xf8\x01\x01b\x06proto3" var ( file_cel_expr_syntax_proto_rawDescOnce sync.Once - file_cel_expr_syntax_proto_rawDescData = file_cel_expr_syntax_proto_rawDesc + file_cel_expr_syntax_proto_rawDescData []byte ) func file_cel_expr_syntax_proto_rawDescGZIP() []byte { file_cel_expr_syntax_proto_rawDescOnce.Do(func() { - file_cel_expr_syntax_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_syntax_proto_rawDescData) + file_cel_expr_syntax_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_cel_expr_syntax_proto_rawDesc), len(file_cel_expr_syntax_proto_rawDesc))) }) return file_cel_expr_syntax_proto_rawDescData } var file_cel_expr_syntax_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_cel_expr_syntax_proto_msgTypes = make([]protoimpl.MessageInfo, 15) -var file_cel_expr_syntax_proto_goTypes = []interface{}{ +var file_cel_expr_syntax_proto_goTypes = []any{ (SourceInfo_Extension_Component)(0), // 0: cel.expr.SourceInfo.Extension.Component (*ParsedExpr)(nil), // 1: cel.expr.ParsedExpr (*Expr)(nil), // 2: cel.expr.Expr @@ -1429,165 +1349,7 @@ func file_cel_expr_syntax_proto_init() { if File_cel_expr_syntax_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_cel_expr_syntax_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ParsedExpr); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_syntax_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Expr); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_syntax_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Constant); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_syntax_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SourceInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_syntax_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Expr_Ident); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_syntax_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Expr_Select); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_syntax_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Expr_Call); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_syntax_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Expr_CreateList); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_syntax_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Expr_CreateStruct); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_syntax_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Expr_Comprehension); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_syntax_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Expr_CreateStruct_Entry); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_syntax_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SourceInfo_Extension); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_syntax_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SourceInfo_Extension_Version); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_cel_expr_syntax_proto_msgTypes[1].OneofWrappers = []interface{}{ + file_cel_expr_syntax_proto_msgTypes[1].OneofWrappers = []any{ (*Expr_ConstExpr)(nil), (*Expr_IdentExpr)(nil), (*Expr_SelectExpr)(nil), @@ -1596,7 +1358,7 @@ func file_cel_expr_syntax_proto_init() { (*Expr_StructExpr)(nil), (*Expr_ComprehensionExpr)(nil), } - file_cel_expr_syntax_proto_msgTypes[2].OneofWrappers = []interface{}{ + file_cel_expr_syntax_proto_msgTypes[2].OneofWrappers = []any{ (*Constant_NullValue)(nil), (*Constant_BoolValue)(nil), (*Constant_Int64Value)(nil), @@ -1607,7 +1369,7 @@ func file_cel_expr_syntax_proto_init() { (*Constant_DurationValue)(nil), (*Constant_TimestampValue)(nil), } - file_cel_expr_syntax_proto_msgTypes[10].OneofWrappers = []interface{}{ + file_cel_expr_syntax_proto_msgTypes[10].OneofWrappers = []any{ (*Expr_CreateStruct_Entry_FieldKey)(nil), (*Expr_CreateStruct_Entry_MapKey)(nil), } @@ -1615,7 +1377,7 @@ func file_cel_expr_syntax_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_cel_expr_syntax_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_cel_expr_syntax_proto_rawDesc), len(file_cel_expr_syntax_proto_rawDesc)), NumEnums: 1, NumMessages: 15, NumExtensions: 0, @@ -1627,7 +1389,6 @@ func file_cel_expr_syntax_proto_init() { MessageInfos: file_cel_expr_syntax_proto_msgTypes, }.Build() File_cel_expr_syntax_proto = out.File - file_cel_expr_syntax_proto_rawDesc = nil file_cel_expr_syntax_proto_goTypes = nil file_cel_expr_syntax_proto_depIdxs = nil } diff --git a/vendor/cel.dev/expr/value.pb.go b/vendor/cel.dev/expr/value.pb.go index e5e29228c..1f53a6a29 100644 --- a/vendor/cel.dev/expr/value.pb.go +++ b/vendor/cel.dev/expr/value.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.21.5 +// protoc-gen-go v1.36.10 +// protoc v5.27.1 // source: cel/expr/value.proto package expr @@ -13,6 +13,7 @@ import ( structpb "google.golang.org/protobuf/types/known/structpb" reflect "reflect" sync "sync" + unsafe "unsafe" ) const ( @@ -23,11 +24,8 @@ const ( ) type Value struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to Kind: + state protoimpl.MessageState `protogen:"open.v1"` + // Types that are valid to be assigned to Kind: // // *Value_NullValue // *Value_BoolValue @@ -41,16 +39,16 @@ type Value struct { // *Value_MapValue // *Value_ListValue // *Value_TypeValue - Kind isValue_Kind `protobuf_oneof:"kind"` + Kind isValue_Kind `protobuf_oneof:"kind"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Value) Reset() { *x = Value{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_value_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_value_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Value) String() string { @@ -61,7 +59,7 @@ func (*Value) ProtoMessage() {} func (x *Value) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_value_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -76,93 +74,117 @@ func (*Value) Descriptor() ([]byte, []int) { return file_cel_expr_value_proto_rawDescGZIP(), []int{0} } -func (m *Value) GetKind() isValue_Kind { - if m != nil { - return m.Kind +func (x *Value) GetKind() isValue_Kind { + if x != nil { + return x.Kind } return nil } func (x *Value) GetNullValue() structpb.NullValue { - if x, ok := x.GetKind().(*Value_NullValue); ok { - return x.NullValue + if x != nil { + if x, ok := x.Kind.(*Value_NullValue); ok { + return x.NullValue + } } return structpb.NullValue(0) } func (x *Value) GetBoolValue() bool { - if x, ok := x.GetKind().(*Value_BoolValue); ok { - return x.BoolValue + if x != nil { + if x, ok := x.Kind.(*Value_BoolValue); ok { + return x.BoolValue + } } return false } func (x *Value) GetInt64Value() int64 { - if x, ok := x.GetKind().(*Value_Int64Value); ok { - return x.Int64Value + if x != nil { + if x, ok := x.Kind.(*Value_Int64Value); ok { + return x.Int64Value + } } return 0 } func (x *Value) GetUint64Value() uint64 { - if x, ok := x.GetKind().(*Value_Uint64Value); ok { - return x.Uint64Value + if x != nil { + if x, ok := x.Kind.(*Value_Uint64Value); ok { + return x.Uint64Value + } } return 0 } func (x *Value) GetDoubleValue() float64 { - if x, ok := x.GetKind().(*Value_DoubleValue); ok { - return x.DoubleValue + if x != nil { + if x, ok := x.Kind.(*Value_DoubleValue); ok { + return x.DoubleValue + } } return 0 } func (x *Value) GetStringValue() string { - if x, ok := x.GetKind().(*Value_StringValue); ok { - return x.StringValue + if x != nil { + if x, ok := x.Kind.(*Value_StringValue); ok { + return x.StringValue + } } return "" } func (x *Value) GetBytesValue() []byte { - if x, ok := x.GetKind().(*Value_BytesValue); ok { - return x.BytesValue + if x != nil { + if x, ok := x.Kind.(*Value_BytesValue); ok { + return x.BytesValue + } } return nil } func (x *Value) GetEnumValue() *EnumValue { - if x, ok := x.GetKind().(*Value_EnumValue); ok { - return x.EnumValue + if x != nil { + if x, ok := x.Kind.(*Value_EnumValue); ok { + return x.EnumValue + } } return nil } func (x *Value) GetObjectValue() *anypb.Any { - if x, ok := x.GetKind().(*Value_ObjectValue); ok { - return x.ObjectValue + if x != nil { + if x, ok := x.Kind.(*Value_ObjectValue); ok { + return x.ObjectValue + } } return nil } func (x *Value) GetMapValue() *MapValue { - if x, ok := x.GetKind().(*Value_MapValue); ok { - return x.MapValue + if x != nil { + if x, ok := x.Kind.(*Value_MapValue); ok { + return x.MapValue + } } return nil } func (x *Value) GetListValue() *ListValue { - if x, ok := x.GetKind().(*Value_ListValue); ok { - return x.ListValue + if x != nil { + if x, ok := x.Kind.(*Value_ListValue); ok { + return x.ListValue + } } return nil } func (x *Value) GetTypeValue() string { - if x, ok := x.GetKind().(*Value_TypeValue); ok { - return x.TypeValue + if x != nil { + if x, ok := x.Kind.(*Value_TypeValue); ok { + return x.TypeValue + } } return "" } @@ -244,21 +266,18 @@ func (*Value_ListValue) isValue_Kind() {} func (*Value_TypeValue) isValue_Kind() {} type EnumValue struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Value int32 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` unknownFields protoimpl.UnknownFields - - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Value int32 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` + sizeCache protoimpl.SizeCache } func (x *EnumValue) Reset() { *x = EnumValue{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_value_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_value_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumValue) String() string { @@ -269,7 +288,7 @@ func (*EnumValue) ProtoMessage() {} func (x *EnumValue) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_value_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -299,20 +318,17 @@ func (x *EnumValue) GetValue() int32 { } type ListValue struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` unknownFields protoimpl.UnknownFields - - Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ListValue) Reset() { *x = ListValue{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_value_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_value_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ListValue) String() string { @@ -323,7 +339,7 @@ func (*ListValue) ProtoMessage() {} func (x *ListValue) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_value_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -346,20 +362,17 @@ func (x *ListValue) GetValues() []*Value { } type MapValue struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Entries []*MapValue_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` unknownFields protoimpl.UnknownFields - - Entries []*MapValue_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` + sizeCache protoimpl.SizeCache } func (x *MapValue) Reset() { *x = MapValue{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_value_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_value_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MapValue) String() string { @@ -370,7 +383,7 @@ func (*MapValue) ProtoMessage() {} func (x *MapValue) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_value_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -393,21 +406,18 @@ func (x *MapValue) GetEntries() []*MapValue_Entry { } type MapValue_Entry struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Key *Value `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value *Value `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` unknownFields protoimpl.UnknownFields - - Key *Value `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value *Value `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + sizeCache protoimpl.SizeCache } func (x *MapValue_Entry) Reset() { *x = MapValue_Entry{} - if protoimpl.UnsafeEnabled { - mi := &file_cel_expr_value_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_cel_expr_value_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MapValue_Entry) String() string { @@ -418,7 +428,7 @@ func (*MapValue_Entry) ProtoMessage() {} func (x *MapValue_Entry) ProtoReflect() protoreflect.Message { mi := &file_cel_expr_value_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -449,83 +459,58 @@ func (x *MapValue_Entry) GetValue() *Value { var File_cel_expr_value_proto protoreflect.FileDescriptor -var file_cel_expr_value_proto_rawDesc = []byte{ - 0x0a, 0x14, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, - 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, - 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x9d, 0x04, 0x0a, 0x05, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0a, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x0b, 0x75, 0x69, - 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x64, 0x6f, 0x75, - 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x48, - 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, - 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62, 0x79, 0x74, 0x65, - 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x65, 0x6c, - 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, - 0x00, 0x52, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x39, 0x0a, 0x0c, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0a, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x6f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x31, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, - 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x4d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, - 0x52, 0x08, 0x6d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x34, 0x0a, 0x0a, 0x6c, 0x69, - 0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, - 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x12, 0x1f, 0x0a, 0x0a, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0f, - 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x09, 0x74, 0x79, 0x70, 0x65, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0x35, 0x0a, 0x09, 0x45, 0x6e, 0x75, - 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x22, 0x34, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, - 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, - 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0x91, 0x01, 0x0a, 0x08, 0x4d, 0x61, 0x70, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, - 0x4d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, - 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x1a, 0x51, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x12, 0x21, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, - 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x03, - 0x6b, 0x65, 0x79, 0x12, 0x25, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x2d, 0x0a, 0x0c, 0x64, 0x65, - 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0a, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, 0x65, - 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, -} +const file_cel_expr_value_proto_rawDesc = "" + + "\n" + + "\x14cel/expr/value.proto\x12\bcel.expr\x1a\x19google/protobuf/any.proto\x1a\x1cgoogle/protobuf/struct.proto\"\x9d\x04\n" + + "\x05Value\x12;\n" + + "\n" + + "null_value\x18\x01 \x01(\x0e2\x1a.google.protobuf.NullValueH\x00R\tnullValue\x12\x1f\n" + + "\n" + + "bool_value\x18\x02 \x01(\bH\x00R\tboolValue\x12!\n" + + "\vint64_value\x18\x03 \x01(\x03H\x00R\n" + + "int64Value\x12#\n" + + "\fuint64_value\x18\x04 \x01(\x04H\x00R\vuint64Value\x12#\n" + + "\fdouble_value\x18\x05 \x01(\x01H\x00R\vdoubleValue\x12#\n" + + "\fstring_value\x18\x06 \x01(\tH\x00R\vstringValue\x12!\n" + + "\vbytes_value\x18\a \x01(\fH\x00R\n" + + "bytesValue\x124\n" + + "\n" + + "enum_value\x18\t \x01(\v2\x13.cel.expr.EnumValueH\x00R\tenumValue\x129\n" + + "\fobject_value\x18\n" + + " \x01(\v2\x14.google.protobuf.AnyH\x00R\vobjectValue\x121\n" + + "\tmap_value\x18\v \x01(\v2\x12.cel.expr.MapValueH\x00R\bmapValue\x124\n" + + "\n" + + "list_value\x18\f \x01(\v2\x13.cel.expr.ListValueH\x00R\tlistValue\x12\x1f\n" + + "\n" + + "type_value\x18\x0f \x01(\tH\x00R\ttypeValueB\x06\n" + + "\x04kind\"5\n" + + "\tEnumValue\x12\x12\n" + + "\x04type\x18\x01 \x01(\tR\x04type\x12\x14\n" + + "\x05value\x18\x02 \x01(\x05R\x05value\"4\n" + + "\tListValue\x12'\n" + + "\x06values\x18\x01 \x03(\v2\x0f.cel.expr.ValueR\x06values\"\x91\x01\n" + + "\bMapValue\x122\n" + + "\aentries\x18\x01 \x03(\v2\x18.cel.expr.MapValue.EntryR\aentries\x1aQ\n" + + "\x05Entry\x12!\n" + + "\x03key\x18\x01 \x01(\v2\x0f.cel.expr.ValueR\x03key\x12%\n" + + "\x05value\x18\x02 \x01(\v2\x0f.cel.expr.ValueR\x05valueB-\n" + + "\fdev.cel.exprB\n" + + "ValueProtoP\x01Z\fcel.dev/expr\xf8\x01\x01b\x06proto3" var ( file_cel_expr_value_proto_rawDescOnce sync.Once - file_cel_expr_value_proto_rawDescData = file_cel_expr_value_proto_rawDesc + file_cel_expr_value_proto_rawDescData []byte ) func file_cel_expr_value_proto_rawDescGZIP() []byte { file_cel_expr_value_proto_rawDescOnce.Do(func() { - file_cel_expr_value_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_value_proto_rawDescData) + file_cel_expr_value_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_cel_expr_value_proto_rawDesc), len(file_cel_expr_value_proto_rawDesc))) }) return file_cel_expr_value_proto_rawDescData } var file_cel_expr_value_proto_msgTypes = make([]protoimpl.MessageInfo, 5) -var file_cel_expr_value_proto_goTypes = []interface{}{ +var file_cel_expr_value_proto_goTypes = []any{ (*Value)(nil), // 0: cel.expr.Value (*EnumValue)(nil), // 1: cel.expr.EnumValue (*ListValue)(nil), // 2: cel.expr.ListValue @@ -556,69 +541,7 @@ func file_cel_expr_value_proto_init() { if File_cel_expr_value_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_cel_expr_value_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Value); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_value_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EnumValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_value_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_value_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MapValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_cel_expr_value_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MapValue_Entry); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_cel_expr_value_proto_msgTypes[0].OneofWrappers = []interface{}{ + file_cel_expr_value_proto_msgTypes[0].OneofWrappers = []any{ (*Value_NullValue)(nil), (*Value_BoolValue)(nil), (*Value_Int64Value)(nil), @@ -636,7 +559,7 @@ func file_cel_expr_value_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_cel_expr_value_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_cel_expr_value_proto_rawDesc), len(file_cel_expr_value_proto_rawDesc)), NumEnums: 0, NumMessages: 5, NumExtensions: 0, @@ -647,7 +570,6 @@ func file_cel_expr_value_proto_init() { MessageInfos: file_cel_expr_value_proto_msgTypes, }.Build() File_cel_expr_value_proto = out.File - file_cel_expr_value_proto_rawDesc = nil file_cel_expr_value_proto_goTypes = nil file_cel_expr_value_proto_depIdxs = nil } diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md index 235496eeb..1101d206d 100644 --- a/vendor/github.com/BurntSushi/toml/README.md +++ b/vendor/github.com/BurntSushi/toml/README.md @@ -1,7 +1,7 @@ TOML stands for Tom's Obvious, Minimal Language. This Go package provides a reflection interface similar to Go's standard library `json` and `xml` packages. -Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0). +Compatible with TOML version [v1.1.0](https://toml.io/en/v1.1.0). Documentation: https://pkg.go.dev/github.com/BurntSushi/toml diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go index 3fa516caa..ed884840f 100644 --- a/vendor/github.com/BurntSushi/toml/decode.go +++ b/vendor/github.com/BurntSushi/toml/decode.go @@ -206,6 +206,13 @@ func markDecodedRecursive(md *MetaData, tmap map[string]any) { markDecodedRecursive(md, tmap) md.context = md.context[0 : len(md.context)-1] } + if tarr, ok := tmap[key].([]map[string]any); ok { + for _, elm := range tarr { + md.context = append(md.context, key) + markDecodedRecursive(md, elm) + md.context = md.context[0 : len(md.context)-1] + } + } } } @@ -423,7 +430,7 @@ func (md *MetaData) unifyString(data any, rv reflect.Value) error { if i, ok := data.(int64); ok { rv.SetString(strconv.FormatInt(i, 10)) } else if f, ok := data.(float64); ok { - rv.SetString(strconv.FormatFloat(f, 'f', -1, 64)) + rv.SetString(strconv.FormatFloat(f, 'g', -1, 64)) } else { return md.badtype("string", data) } diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go index ac196e7df..bd7aa1865 100644 --- a/vendor/github.com/BurntSushi/toml/encode.go +++ b/vendor/github.com/BurntSushi/toml/encode.go @@ -228,9 +228,9 @@ func (enc *Encoder) eElement(rv reflect.Value) { } switch v.Location() { default: - enc.wf(v.Format(format)) + enc.write(v.Format(format)) case internal.LocalDatetime, internal.LocalDate, internal.LocalTime: - enc.wf(v.In(time.UTC).Format(format)) + enc.write(v.In(time.UTC).Format(format)) } return case Marshaler: @@ -279,40 +279,40 @@ func (enc *Encoder) eElement(rv reflect.Value) { case reflect.String: enc.writeQuoted(rv.String()) case reflect.Bool: - enc.wf(strconv.FormatBool(rv.Bool())) + enc.write(strconv.FormatBool(rv.Bool())) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - enc.wf(strconv.FormatInt(rv.Int(), 10)) + enc.write(strconv.FormatInt(rv.Int(), 10)) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - enc.wf(strconv.FormatUint(rv.Uint(), 10)) + enc.write(strconv.FormatUint(rv.Uint(), 10)) case reflect.Float32: f := rv.Float() if math.IsNaN(f) { if math.Signbit(f) { - enc.wf("-") + enc.write("-") } - enc.wf("nan") + enc.write("nan") } else if math.IsInf(f, 0) { if math.Signbit(f) { - enc.wf("-") + enc.write("-") } - enc.wf("inf") + enc.write("inf") } else { - enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 32))) + enc.write(floatAddDecimal(strconv.FormatFloat(f, 'g', -1, 32))) } case reflect.Float64: f := rv.Float() if math.IsNaN(f) { if math.Signbit(f) { - enc.wf("-") + enc.write("-") } - enc.wf("nan") + enc.write("nan") } else if math.IsInf(f, 0) { if math.Signbit(f) { - enc.wf("-") + enc.write("-") } - enc.wf("inf") + enc.write("inf") } else { - enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 64))) + enc.write(floatAddDecimal(strconv.FormatFloat(f, 'g', -1, 64))) } case reflect.Array, reflect.Slice: enc.eArrayOrSliceElement(rv) @@ -330,27 +330,32 @@ func (enc *Encoder) eElement(rv reflect.Value) { // By the TOML spec, all floats must have a decimal with at least one number on // either side. func floatAddDecimal(fstr string) string { - if !strings.Contains(fstr, ".") { - return fstr + ".0" + for _, c := range fstr { + if c == 'e' { // Exponent syntax + return fstr + } + if c == '.' { + return fstr + } } - return fstr + return fstr + ".0" } func (enc *Encoder) writeQuoted(s string) { - enc.wf("\"%s\"", dblQuotedReplacer.Replace(s)) + enc.write(`"` + dblQuotedReplacer.Replace(s) + `"`) } func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) { length := rv.Len() - enc.wf("[") + enc.write("[") for i := 0; i < length; i++ { elem := eindirect(rv.Index(i)) enc.eElement(elem) if i != length-1 { - enc.wf(", ") + enc.write(", ") } } - enc.wf("]") + enc.write("]") } func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) { @@ -363,7 +368,7 @@ func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) { continue } enc.newline() - enc.wf("%s[[%s]]", enc.indentStr(key), key) + enc.writef("%s[[%s]]", enc.indentStr(key), key) enc.newline() enc.eMapOrStruct(key, trv, false) } @@ -376,7 +381,7 @@ func (enc *Encoder) eTable(key Key, rv reflect.Value) { enc.newline() } if len(key) > 0 { - enc.wf("%s[%s]", enc.indentStr(key), key) + enc.writef("%s[%s]", enc.indentStr(key), key) enc.newline() } enc.eMapOrStruct(key, rv, false) @@ -422,7 +427,7 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) { if inline { enc.writeKeyValue(Key{mapKey.String()}, val, true) if trailC || i != len(mapKeys)-1 { - enc.wf(", ") + enc.write(", ") } } else { enc.encode(key.add(mapKey.String()), val) @@ -431,12 +436,12 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) { } if inline { - enc.wf("{") + enc.write("{") } writeMapKeys(mapKeysDirect, len(mapKeysSub) > 0) writeMapKeys(mapKeysSub, false) if inline { - enc.wf("}") + enc.write("}") } } @@ -534,7 +539,7 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { if inline { enc.writeKeyValue(Key{keyName}, fieldVal, true) if fieldIndex[0] != totalFields-1 { - enc.wf(", ") + enc.write(", ") } } else { enc.encode(key.add(keyName), fieldVal) @@ -543,14 +548,14 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { } if inline { - enc.wf("{") + enc.write("{") } l := len(fieldsDirect) + len(fieldsSub) writeFields(fieldsDirect, l) writeFields(fieldsSub, l) if inline { - enc.wf("}") + enc.write("}") } } @@ -700,7 +705,7 @@ func isEmpty(rv reflect.Value) bool { func (enc *Encoder) newline() { if enc.hasWritten { - enc.wf("\n") + enc.write("\n") } } @@ -722,14 +727,22 @@ func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) { enc.eElement(val) return } - enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1)) + enc.writef("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1)) enc.eElement(val) if !inline { enc.newline() } } -func (enc *Encoder) wf(format string, v ...any) { +func (enc *Encoder) write(s string) { + _, err := enc.w.WriteString(s) + if err != nil { + encPanic(err) + } + enc.hasWritten = true +} + +func (enc *Encoder) writef(format string, v ...any) { _, err := fmt.Fprintf(enc.w, format, v...) if err != nil { encPanic(err) diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go index 1c3b47702..9f4396a0f 100644 --- a/vendor/github.com/BurntSushi/toml/lex.go +++ b/vendor/github.com/BurntSushi/toml/lex.go @@ -13,7 +13,6 @@ type itemType int const ( itemError itemType = iota - itemNIL // used in the parser to indicate no type itemEOF itemText itemString @@ -47,14 +46,13 @@ func (p Position) String() string { } type lexer struct { - input string - start int - pos int - line int - state stateFn - items chan item - tomlNext bool - esc bool + input string + start int + pos int + line int + state stateFn + items chan item + esc bool // Allow for backing up up to 4 runes. This is necessary because TOML // contains 3-rune tokens (""" and '''). @@ -90,14 +88,13 @@ func (lx *lexer) nextItem() item { } } -func lex(input string, tomlNext bool) *lexer { +func lex(input string) *lexer { lx := &lexer{ - input: input, - state: lexTop, - items: make(chan item, 10), - stack: make([]stateFn, 0, 10), - line: 1, - tomlNext: tomlNext, + input: input, + state: lexTop, + items: make(chan item, 10), + stack: make([]stateFn, 0, 10), + line: 1, } return lx } @@ -108,7 +105,7 @@ func (lx *lexer) push(state stateFn) { func (lx *lexer) pop() stateFn { if len(lx.stack) == 0 { - return lx.errorf("BUG in lexer: no states to pop") + panic("BUG in lexer: no states to pop") } last := lx.stack[len(lx.stack)-1] lx.stack = lx.stack[0 : len(lx.stack)-1] @@ -305,6 +302,8 @@ func lexTop(lx *lexer) stateFn { return lexTableStart case eof: if lx.pos > lx.start { + // TODO: never reached? I think this can only occur on a bug in the + // lexer(?) return lx.errorf("unexpected EOF") } lx.emit(itemEOF) @@ -392,8 +391,6 @@ func lexTableNameStart(lx *lexer) stateFn { func lexTableNameEnd(lx *lexer) stateFn { lx.skip(isWhitespace) switch r := lx.next(); { - case isWhitespace(r): - return lexTableNameEnd case r == '.': lx.ignore() return lexTableNameStart @@ -412,7 +409,7 @@ func lexTableNameEnd(lx *lexer) stateFn { // Lexes only one part, e.g. only 'a' inside 'a.b'. func lexBareName(lx *lexer) stateFn { r := lx.next() - if isBareKeyChar(r, lx.tomlNext) { + if isBareKeyChar(r) { return lexBareName } lx.backup() @@ -420,23 +417,23 @@ func lexBareName(lx *lexer) stateFn { return lx.pop() } -// lexBareName lexes one part of a key or table. -// -// It assumes that at least one valid character for the table has already been -// read. +// lexQuotedName lexes one part of a quoted key or table name. It assumes that +// it starts lexing at the quote itself (" or '). // // Lexes only one part, e.g. only '"a"' inside '"a".b'. func lexQuotedName(lx *lexer) stateFn { r := lx.next() switch { - case isWhitespace(r): - return lexSkip(lx, lexValue) case r == '"': lx.ignore() // ignore the '"' return lexString case r == '\'': lx.ignore() // ignore the "'" return lexRawString + + // TODO: I don't think any of the below conditions can ever be reached? + case isWhitespace(r): + return lexSkip(lx, lexValue) case r == eof: return lx.errorf("unexpected EOF; expected value") default: @@ -464,17 +461,19 @@ func lexKeyStart(lx *lexer) stateFn { func lexKeyNameStart(lx *lexer) stateFn { lx.skip(isWhitespace) switch r := lx.peek(); { - case r == '=' || r == eof: - return lx.errorf("unexpected '='") - case r == '.': - return lx.errorf("unexpected '.'") + default: + lx.push(lexKeyEnd) + return lexBareName case r == '"' || r == '\'': lx.ignore() lx.push(lexKeyEnd) return lexQuotedName - default: - lx.push(lexKeyEnd) - return lexBareName + + // TODO: I think these can never be reached? + case r == '=' || r == eof: + return lx.errorf("unexpected '='") + case r == '.': + return lx.errorf("unexpected '.'") } } @@ -485,7 +484,7 @@ func lexKeyEnd(lx *lexer) stateFn { switch r := lx.next(); { case isWhitespace(r): return lexSkip(lx, lexKeyEnd) - case r == eof: + case r == eof: // TODO: never reached return lx.errorf("unexpected EOF; expected key separator '='") case r == '.': lx.ignore() @@ -628,10 +627,7 @@ func lexInlineTableValue(lx *lexer) stateFn { case isWhitespace(r): return lexSkip(lx, lexInlineTableValue) case isNL(r): - if lx.tomlNext { - return lexSkip(lx, lexInlineTableValue) - } - return lx.errorPrevLine(errLexInlineTableNL{}) + return lexSkip(lx, lexInlineTableValue) case r == '#': lx.push(lexInlineTableValue) return lexCommentStart @@ -653,10 +649,7 @@ func lexInlineTableValueEnd(lx *lexer) stateFn { case isWhitespace(r): return lexSkip(lx, lexInlineTableValueEnd) case isNL(r): - if lx.tomlNext { - return lexSkip(lx, lexInlineTableValueEnd) - } - return lx.errorPrevLine(errLexInlineTableNL{}) + return lexSkip(lx, lexInlineTableValueEnd) case r == '#': lx.push(lexInlineTableValueEnd) return lexCommentStart @@ -664,10 +657,7 @@ func lexInlineTableValueEnd(lx *lexer) stateFn { lx.ignore() lx.skip(isWhitespace) if lx.peek() == '}' { - if lx.tomlNext { - return lexInlineTableValueEnd - } - return lx.errorf("trailing comma not allowed in inline tables") + return lexInlineTableValueEnd } return lexInlineTableValue case r == '}': @@ -855,9 +845,6 @@ func lexStringEscape(lx *lexer) stateFn { r := lx.next() switch r { case 'e': - if !lx.tomlNext { - return lx.error(errLexEscape{r}) - } fallthrough case 'b': fallthrough @@ -878,9 +865,6 @@ func lexStringEscape(lx *lexer) stateFn { case '\\': return lx.pop() case 'x': - if !lx.tomlNext { - return lx.error(errLexEscape{r}) - } return lexHexEscape case 'u': return lexShortUnicodeEscape @@ -928,19 +912,9 @@ func lexLongUnicodeEscape(lx *lexer) stateFn { // lexBaseNumberOrDate can differentiate base prefixed integers from other // types. func lexNumberOrDateStart(lx *lexer) stateFn { - r := lx.next() - switch r { - case '0': + if lx.next() == '0' { return lexBaseNumberOrDate } - - if !isDigit(r) { - // The only way to reach this state is if the value starts - // with a digit, so specifically treat anything else as an - // error. - return lx.errorf("expected a digit but got %q", r) - } - return lexNumberOrDate } @@ -1196,13 +1170,13 @@ func lexSkip(lx *lexer, nextState stateFn) stateFn { } func (s stateFn) String() string { + if s == nil { + return "" + } name := runtime.FuncForPC(reflect.ValueOf(s).Pointer()).Name() if i := strings.LastIndexByte(name, '.'); i > -1 { name = name[i+1:] } - if s == nil { - name = "" - } return name + "()" } @@ -1210,8 +1184,6 @@ func (itype itemType) String() string { switch itype { case itemError: return "Error" - case itemNIL: - return "NIL" case itemEOF: return "EOF" case itemText: @@ -1226,18 +1198,22 @@ func (itype itemType) String() string { return "Float" case itemDatetime: return "DateTime" + case itemArray: + return "Array" + case itemArrayEnd: + return "ArrayEnd" case itemTableStart: return "TableStart" case itemTableEnd: return "TableEnd" + case itemArrayTableStart: + return "ArrayTableStart" + case itemArrayTableEnd: + return "ArrayTableEnd" case itemKeyStart: return "KeyStart" case itemKeyEnd: return "KeyEnd" - case itemArray: - return "Array" - case itemArrayEnd: - return "ArrayEnd" case itemCommentStart: return "CommentStart" case itemInlineTableStart: @@ -1266,7 +1242,7 @@ func isDigit(r rune) bool { return r >= '0' && r <= '9' } func isBinary(r rune) bool { return r == '0' || r == '1' } func isOctal(r rune) bool { return r >= '0' && r <= '7' } func isHex(r rune) bool { return (r >= '0' && r <= '9') || (r|0x20 >= 'a' && r|0x20 <= 'f') } -func isBareKeyChar(r rune, tomlNext bool) bool { +func isBareKeyChar(r rune) bool { return (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') || r == '_' || r == '-' } diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go index e3ea8a9a2..b474247ae 100644 --- a/vendor/github.com/BurntSushi/toml/parse.go +++ b/vendor/github.com/BurntSushi/toml/parse.go @@ -3,7 +3,6 @@ package toml import ( "fmt" "math" - "os" "strconv" "strings" "time" @@ -17,7 +16,6 @@ type parser struct { context Key // Full key for the current hash in scope. currentKey string // Base key name for everything except hashes. pos Position // Current position in the TOML file. - tomlNext bool ordered []Key // List of keys in the order that they appear in the TOML data. @@ -32,8 +30,6 @@ type keyInfo struct { } func parse(data string) (p *parser, err error) { - _, tomlNext := os.LookupEnv("BURNTSUSHI_TOML_110") - defer func() { if r := recover(); r != nil { if pErr, ok := r.(ParseError); ok { @@ -73,10 +69,9 @@ func parse(data string) (p *parser, err error) { p = &parser{ keyInfo: make(map[string]keyInfo), mapping: make(map[string]any), - lx: lex(data, tomlNext), + lx: lex(data), ordered: make([]Key, 0), implicits: make(map[string]struct{}), - tomlNext: tomlNext, } for { item := p.next() @@ -350,17 +345,14 @@ func (p *parser) valueFloat(it item) (any, tomlType) { var dtTypes = []struct { fmt string zone *time.Location - next bool }{ - {time.RFC3339Nano, time.Local, false}, - {"2006-01-02T15:04:05.999999999", internal.LocalDatetime, false}, - {"2006-01-02", internal.LocalDate, false}, - {"15:04:05.999999999", internal.LocalTime, false}, - - // tomlNext - {"2006-01-02T15:04Z07:00", time.Local, true}, - {"2006-01-02T15:04", internal.LocalDatetime, true}, - {"15:04", internal.LocalTime, true}, + {time.RFC3339Nano, time.Local}, + {"2006-01-02T15:04:05.999999999", internal.LocalDatetime}, + {"2006-01-02", internal.LocalDate}, + {"15:04:05.999999999", internal.LocalTime}, + {"2006-01-02T15:04Z07:00", time.Local}, + {"2006-01-02T15:04", internal.LocalDatetime}, + {"15:04", internal.LocalTime}, } func (p *parser) valueDatetime(it item) (any, tomlType) { @@ -371,9 +363,6 @@ func (p *parser) valueDatetime(it item) (any, tomlType) { err error ) for _, dt := range dtTypes { - if dt.next && !p.tomlNext { - continue - } t, err = time.ParseInLocation(dt.fmt, it.val, dt.zone) if err == nil { if missingLeadingZero(it.val, dt.fmt) { @@ -644,6 +633,11 @@ func (p *parser) setValue(key string, value any) { // Note that since it has already been defined (as a hash), we don't // want to overwrite it. So our business is done. if p.isArray(keyContext) { + if !p.isImplicit(keyContext) { + if _, ok := hash[key]; ok { + p.panicf("Key '%s' has already been defined.", keyContext) + } + } p.removeImplicit(keyContext) hash[key] = value return @@ -802,10 +796,8 @@ func (p *parser) replaceEscapes(it item, str string) string { b.WriteByte(0x0d) skip = 1 case 'e': - if p.tomlNext { - b.WriteByte(0x1b) - skip = 1 - } + b.WriteByte(0x1b) + skip = 1 case '"': b.WriteByte(0x22) skip = 1 @@ -815,11 +807,9 @@ func (p *parser) replaceEscapes(it item, str string) string { // The lexer guarantees the correct number of characters are present; // don't need to check here. case 'x': - if p.tomlNext { - escaped := p.asciiEscapeToUnicode(it, str[i+2:i+4]) - b.WriteRune(escaped) - skip = 3 - } + escaped := p.asciiEscapeToUnicode(it, str[i+2:i+4]) + b.WriteRune(escaped) + skip = 3 case 'u': escaped := p.asciiEscapeToUnicode(it, str[i+2:i+6]) b.WriteRune(escaped) diff --git a/vendor/github.com/cenkalti/backoff/v5/exponential.go b/vendor/github.com/cenkalti/backoff/v5/exponential.go index c1f3e442d..79d425e87 100644 --- a/vendor/github.com/cenkalti/backoff/v5/exponential.go +++ b/vendor/github.com/cenkalti/backoff/v5/exponential.go @@ -1,7 +1,7 @@ package backoff import ( - "math/rand" + "math/rand/v2" "time" ) @@ -28,13 +28,7 @@ multiplied by the exponential, that is, between 2 and 6 seconds. Note: MaxInterval caps the RetryInterval and not the randomized interval. -If the time elapsed since an ExponentialBackOff instance is created goes past the -MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop. - -The elapsed time can be reset by calling Reset(). - -Example: Given the following default arguments, for 10 tries the sequence will be, -and assuming we go over the MaxElapsedTime on the 10th try: +Example: Given the following default arguments, for 9 tries the sequence will be: Request # RetryInterval (seconds) Randomized Interval (seconds) @@ -47,7 +41,6 @@ and assuming we go over the MaxElapsedTime on the 10th try: 7 5.692 [2.846, 8.538] 8 8.538 [4.269, 12.807] 9 12.807 [6.403, 19.210] - 10 19.210 backoff.Stop Note: Implementation is not thread-safe. */ diff --git a/vendor/github.com/cenkalti/backoff/v5/retry.go b/vendor/github.com/cenkalti/backoff/v5/retry.go index e43f47fb8..32a7f9883 100644 --- a/vendor/github.com/cenkalti/backoff/v5/retry.go +++ b/vendor/github.com/cenkalti/backoff/v5/retry.go @@ -47,7 +47,7 @@ func WithNotify(n Notify) RetryOption { } } -// WithMaxTries limits the number of retry attempts. +// WithMaxTries limits the number of all attempts. func WithMaxTries(n uint) RetryOption { return func(args *retryOptions) { args.MaxTries = n @@ -97,7 +97,7 @@ func Retry[T any](ctx context.Context, operation Operation[T], opts ...RetryOpti // Handle permanent errors without retrying. var permanent *PermanentError if errors.As(err, &permanent) { - return res, err + return res, permanent.Unwrap() } // Stop retrying if context is cancelled. diff --git a/vendor/github.com/cert-manager/cert-manager/LICENSES b/vendor/github.com/cert-manager/cert-manager/LICENSES index c552b7923..dfd67cbfa 100644 --- a/vendor/github.com/cert-manager/cert-manager/LICENSES +++ b/vendor/github.com/cert-manager/cert-manager/LICENSES @@ -1,200 +1,235 @@ -cel.dev/expr,https://github.com/google/cel-spec/blob/v0.19.1/LICENSE,Apache-2.0 -cloud.google.com/go/auth,https://github.com/googleapis/google-cloud-go/blob/auth/v0.9.4/auth/LICENSE,Apache-2.0 -cloud.google.com/go/auth/oauth2adapt,https://github.com/googleapis/google-cloud-go/blob/auth/oauth2adapt/v0.2.4/auth/oauth2adapt/LICENSE,Apache-2.0 -cloud.google.com/go/compute/metadata,https://github.com/googleapis/google-cloud-go/blob/compute/metadata/v0.5.2/compute/metadata/LICENSE,Apache-2.0 -github.com/Azure/azure-sdk-for-go/sdk/azcore,https://github.com/Azure/azure-sdk-for-go/blob/sdk/azcore/v1.14.0/sdk/azcore/LICENSE.txt,MIT -github.com/Azure/azure-sdk-for-go/sdk/azidentity,https://github.com/Azure/azure-sdk-for-go/blob/sdk/azidentity/v1.7.0/sdk/azidentity/LICENSE.txt,MIT -github.com/Azure/azure-sdk-for-go/sdk/internal,https://github.com/Azure/azure-sdk-for-go/blob/sdk/internal/v1.10.0/sdk/internal/LICENSE.txt,MIT -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/dns/armdns,https://github.com/Azure/azure-sdk-for-go/blob/sdk/resourcemanager/dns/armdns/v1.2.0/sdk/resourcemanager/dns/armdns/LICENSE.txt,MIT -github.com/Azure/go-ntlmssp,https://github.com/Azure/go-ntlmssp/blob/754e69321358/LICENSE,MIT -github.com/AzureAD/microsoft-authentication-library-for-go/apps,https://github.com/AzureAD/microsoft-authentication-library-for-go/blob/v1.2.2/LICENSE,MIT -github.com/Khan/genqlient/graphql,https://github.com/Khan/genqlient/blob/v0.7.0/LICENSE,MIT -github.com/NYTimes/gziphandler,https://github.com/NYTimes/gziphandler/blob/v1.1.1/LICENSE,Apache-2.0 -github.com/Venafi/vcert/v5,https://github.com/Venafi/vcert/blob/v5.8.0/LICENSE,Apache-2.0 -github.com/akamai/AkamaiOPEN-edgegrid-golang,https://github.com/akamai/AkamaiOPEN-edgegrid-golang/blob/v1.2.2/LICENSE,Apache-2.0 -github.com/antlr4-go/antlr/v4,https://github.com/antlr4-go/antlr/blob/v4.13.1/LICENSE,BSD-3-Clause -github.com/asaskevich/govalidator,https://github.com/asaskevich/govalidator/blob/a9d515a09cc2/LICENSE,MIT -github.com/aws/aws-sdk-go-v2,https://github.com/aws/aws-sdk-go-v2/blob/v1.31.0/LICENSE.txt,Apache-2.0 -github.com/aws/aws-sdk-go-v2/config,https://github.com/aws/aws-sdk-go-v2/blob/config/v1.27.36/config/LICENSE.txt,Apache-2.0 -github.com/aws/aws-sdk-go-v2/credentials,https://github.com/aws/aws-sdk-go-v2/blob/credentials/v1.17.34/credentials/LICENSE.txt,Apache-2.0 -github.com/aws/aws-sdk-go-v2/feature/ec2/imds,https://github.com/aws/aws-sdk-go-v2/blob/feature/ec2/imds/v1.16.14/feature/ec2/imds/LICENSE.txt,Apache-2.0 -github.com/aws/aws-sdk-go-v2/internal/configsources,https://github.com/aws/aws-sdk-go-v2/blob/internal/configsources/v1.3.18/internal/configsources/LICENSE.txt,Apache-2.0 -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2,https://github.com/aws/aws-sdk-go-v2/blob/internal/endpoints/v2.6.18/internal/endpoints/v2/LICENSE.txt,Apache-2.0 -github.com/aws/aws-sdk-go-v2/internal/ini,https://github.com/aws/aws-sdk-go-v2/blob/internal/ini/v1.8.1/internal/ini/LICENSE.txt,Apache-2.0 -github.com/aws/aws-sdk-go-v2/internal/sync/singleflight,https://github.com/aws/aws-sdk-go-v2/blob/v1.31.0/internal/sync/singleflight/LICENSE,BSD-3-Clause -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding,https://github.com/aws/aws-sdk-go-v2/blob/service/internal/accept-encoding/v1.11.5/service/internal/accept-encoding/LICENSE.txt,Apache-2.0 -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url,https://github.com/aws/aws-sdk-go-v2/blob/service/internal/presigned-url/v1.11.20/service/internal/presigned-url/LICENSE.txt,Apache-2.0 -github.com/aws/aws-sdk-go-v2/service/route53,https://github.com/aws/aws-sdk-go-v2/blob/service/route53/v1.44.0/service/route53/LICENSE.txt,Apache-2.0 -github.com/aws/aws-sdk-go-v2/service/sso,https://github.com/aws/aws-sdk-go-v2/blob/service/sso/v1.23.0/service/sso/LICENSE.txt,Apache-2.0 -github.com/aws/aws-sdk-go-v2/service/ssooidc,https://github.com/aws/aws-sdk-go-v2/blob/service/ssooidc/v1.27.0/service/ssooidc/LICENSE.txt,Apache-2.0 -github.com/aws/aws-sdk-go-v2/service/sts,https://github.com/aws/aws-sdk-go-v2/blob/service/sts/v1.31.0/service/sts/LICENSE.txt,Apache-2.0 -github.com/aws/smithy-go,https://github.com/aws/smithy-go/blob/v1.21.0/LICENSE,Apache-2.0 -github.com/aws/smithy-go/internal/sync/singleflight,https://github.com/aws/smithy-go/blob/v1.21.0/internal/sync/singleflight/LICENSE,BSD-3-Clause -github.com/beorn7/perks/quantile,https://github.com/beorn7/perks/blob/v1.0.1/LICENSE,MIT -github.com/blang/semver/v4,https://github.com/blang/semver/blob/v4.0.0/v4/LICENSE,MIT -github.com/cenkalti/backoff/v4,https://github.com/cenkalti/backoff/blob/v4.3.0/LICENSE,MIT -github.com/cert-manager/cert-manager,https://github.com/cert-manager/cert-manager/blob/HEAD/LICENSE,Apache-2.0 -github.com/cert-manager/cert-manager/pkg/issuer/acme/dns/azuredns,https://github.com/cert-manager/cert-manager/blob/HEAD/pkg/issuer/acme/dns/azuredns/LICENSE,MIT -github.com/cert-manager/cert-manager/pkg/issuer/acme/dns/clouddns,https://github.com/cert-manager/cert-manager/blob/HEAD/pkg/issuer/acme/dns/clouddns/LICENSE,MIT -github.com/cert-manager/cert-manager/pkg/issuer/acme/dns/cloudflare,https://github.com/cert-manager/cert-manager/blob/HEAD/pkg/issuer/acme/dns/cloudflare/LICENSE,MIT -github.com/cert-manager/cert-manager/pkg/issuer/acme/dns/route53,https://github.com/cert-manager/cert-manager/blob/HEAD/pkg/issuer/acme/dns/route53/LICENSE,MIT -github.com/cert-manager/cert-manager/pkg/issuer/acme/dns/util,https://github.com/cert-manager/cert-manager/blob/HEAD/pkg/issuer/acme/dns/util/LICENSE,MIT -github.com/cert-manager/cert-manager/third_party/forked/acme,https://github.com/cert-manager/cert-manager/blob/HEAD/third_party/forked/acme/LICENSE,BSD-3-Clause -github.com/cespare/xxhash/v2,https://github.com/cespare/xxhash/blob/v2.3.0/LICENSE.txt,MIT -github.com/coreos/go-semver/semver,https://github.com/coreos/go-semver/blob/v0.3.1/LICENSE,Apache-2.0 -github.com/coreos/go-systemd/v22,https://github.com/coreos/go-systemd/blob/v22.5.0/LICENSE,Apache-2.0 -github.com/cpu/goacmedns,https://github.com/cpu/goacmedns/blob/v0.1.1/LICENSE,MIT -github.com/davecgh/go-spew/spew,https://github.com/davecgh/go-spew/blob/d8f796af33cc/LICENSE,ISC -github.com/digitalocean/godo,https://github.com/digitalocean/godo/blob/v1.125.0/LICENSE.txt,MIT -github.com/digitalocean/godo,https://github.com/digitalocean/godo/blob/v1.125.0/LICENSE.txt,BSD-3-Clause -github.com/emicklei/go-restful/v3,https://github.com/emicklei/go-restful/blob/v3.12.1/LICENSE,MIT -github.com/evanphx/json-patch/v5,https://github.com/evanphx/json-patch/blob/v5.9.0/v5/LICENSE,BSD-3-Clause -github.com/felixge/httpsnoop,https://github.com/felixge/httpsnoop/blob/v1.0.4/LICENSE.txt,MIT -github.com/fsnotify/fsnotify,https://github.com/fsnotify/fsnotify/blob/v1.8.0/LICENSE,BSD-3-Clause -github.com/fxamacker/cbor/v2,https://github.com/fxamacker/cbor/blob/v2.7.0/LICENSE,MIT -github.com/go-asn1-ber/asn1-ber,https://github.com/go-asn1-ber/asn1-ber/blob/v1.5.6/LICENSE,MIT -github.com/go-http-utils/headers,https://github.com/go-http-utils/headers/blob/fed159eddc2a/LICENSE,MIT -github.com/go-jose/go-jose/v4,https://github.com/go-jose/go-jose/blob/v4.0.5/LICENSE,Apache-2.0 -github.com/go-jose/go-jose/v4/json,https://github.com/go-jose/go-jose/blob/v4.0.5/json/LICENSE,BSD-3-Clause -github.com/go-ldap/ldap/v3,https://github.com/go-ldap/ldap/blob/v3.4.8/v3/LICENSE,MIT -github.com/go-logr/logr,https://github.com/go-logr/logr/blob/v1.4.2/LICENSE,Apache-2.0 -github.com/go-logr/stdr,https://github.com/go-logr/stdr/blob/v1.2.2/LICENSE,Apache-2.0 -github.com/go-logr/zapr,https://github.com/go-logr/zapr/blob/v1.3.0/LICENSE,Apache-2.0 -github.com/go-openapi/jsonpointer,https://github.com/go-openapi/jsonpointer/blob/v0.21.0/LICENSE,Apache-2.0 -github.com/go-openapi/jsonreference,https://github.com/go-openapi/jsonreference/blob/v0.21.0/LICENSE,Apache-2.0 -github.com/go-openapi/swag,https://github.com/go-openapi/swag/blob/v0.23.0/LICENSE,Apache-2.0 -github.com/gogo/protobuf,https://github.com/gogo/protobuf/blob/v1.3.2/LICENSE,BSD-3-Clause -github.com/golang-jwt/jwt/v5,https://github.com/golang-jwt/jwt/blob/v5.2.2/LICENSE,MIT -github.com/golang/groupcache/lru,https://github.com/golang/groupcache/blob/41bb18bfe9da/LICENSE,Apache-2.0 -github.com/golang/protobuf/proto,https://github.com/golang/protobuf/blob/v1.5.4/LICENSE,BSD-3-Clause -github.com/golang/snappy,https://github.com/golang/snappy/blob/v0.0.4/LICENSE,BSD-3-Clause -github.com/google/btree,https://github.com/google/btree/blob/v1.1.3/LICENSE,Apache-2.0 -github.com/google/cel-go,https://github.com/google/cel-go/blob/v0.22.1/LICENSE,Apache-2.0 -github.com/google/cel-go,https://github.com/google/cel-go/blob/v0.22.1/LICENSE,BSD-3-Clause -github.com/google/gnostic-models,https://github.com/google/gnostic-models/blob/v0.6.9/LICENSE,Apache-2.0 -github.com/google/go-cmp/cmp,https://github.com/google/go-cmp/blob/v0.6.0/LICENSE,BSD-3-Clause -github.com/google/go-querystring/query,https://github.com/google/go-querystring/blob/v1.1.0/LICENSE,BSD-3-Clause -github.com/google/gofuzz,https://github.com/google/gofuzz/blob/v1.2.0/LICENSE,Apache-2.0 -github.com/google/s2a-go,https://github.com/google/s2a-go/blob/v0.1.8/LICENSE.md,Apache-2.0 -github.com/google/uuid,https://github.com/google/uuid/blob/v1.6.0/LICENSE,BSD-3-Clause -github.com/googleapis/enterprise-certificate-proxy/client,https://github.com/googleapis/enterprise-certificate-proxy/blob/v0.3.4/LICENSE,Apache-2.0 -github.com/googleapis/gax-go/v2,https://github.com/googleapis/gax-go/blob/v2.13.0/v2/LICENSE,BSD-3-Clause -github.com/gorilla/websocket,https://github.com/gorilla/websocket/blob/v1.5.3/LICENSE,BSD-2-Clause -github.com/grpc-ecosystem/go-grpc-prometheus,https://github.com/grpc-ecosystem/go-grpc-prometheus/blob/v1.2.0/LICENSE,Apache-2.0 -github.com/grpc-ecosystem/grpc-gateway/v2,https://github.com/grpc-ecosystem/grpc-gateway/blob/v2.25.1/LICENSE,BSD-3-Clause -github.com/hashicorp/errwrap,https://github.com/hashicorp/errwrap/blob/v1.1.0/LICENSE,MPL-2.0 -github.com/hashicorp/go-cleanhttp,https://github.com/hashicorp/go-cleanhttp/blob/v0.5.2/LICENSE,MPL-2.0 -github.com/hashicorp/go-multierror,https://github.com/hashicorp/go-multierror/blob/v1.1.1/LICENSE,MPL-2.0 -github.com/hashicorp/go-retryablehttp,https://github.com/hashicorp/go-retryablehttp/blob/v0.7.7/LICENSE,MPL-2.0 -github.com/hashicorp/go-rootcerts,https://github.com/hashicorp/go-rootcerts/blob/v1.0.2/LICENSE,MPL-2.0 -github.com/hashicorp/go-secure-stdlib/parseutil,https://github.com/hashicorp/go-secure-stdlib/blob/parseutil/v0.1.8/parseutil/LICENSE,MPL-2.0 -github.com/hashicorp/go-secure-stdlib/strutil,https://github.com/hashicorp/go-secure-stdlib/blob/strutil/v0.1.2/strutil/LICENSE,MPL-2.0 -github.com/hashicorp/go-sockaddr,https://github.com/hashicorp/go-sockaddr/blob/v1.0.6/LICENSE,MPL-2.0 -github.com/hashicorp/hcl,https://github.com/hashicorp/hcl/blob/v1.0.1-vault-5/LICENSE,MPL-2.0 -github.com/hashicorp/vault/api,https://github.com/hashicorp/vault/blob/api/v1.15.0/api/LICENSE,MPL-2.0 -github.com/hashicorp/vault/sdk/helper,https://github.com/hashicorp/vault/blob/sdk/v0.14.0/sdk/LICENSE,MPL-2.0 -github.com/jmespath/go-jmespath,https://github.com/jmespath/go-jmespath/blob/b0104c826a24/LICENSE,Apache-2.0 -github.com/josharian/intern,https://github.com/josharian/intern/blob/v1.0.0/license.md,MIT -github.com/json-iterator/go,https://github.com/json-iterator/go/blob/v1.1.12/LICENSE,MIT -github.com/klauspost/compress,https://github.com/klauspost/compress/blob/v1.17.11/LICENSE,MIT -github.com/klauspost/compress,https://github.com/klauspost/compress/blob/v1.17.11/LICENSE,Apache-2.0 -github.com/klauspost/compress,https://github.com/klauspost/compress/blob/v1.17.11/LICENSE,BSD-3-Clause -github.com/klauspost/compress/internal/snapref,https://github.com/klauspost/compress/blob/v1.17.11/internal/snapref/LICENSE,BSD-3-Clause -github.com/klauspost/compress/zstd/internal/xxhash,https://github.com/klauspost/compress/blob/v1.17.11/zstd/internal/xxhash/LICENSE.txt,MIT -github.com/kr/pretty,https://github.com/kr/pretty/blob/v0.3.1/License,MIT -github.com/kr/text,https://github.com/kr/text/blob/v0.2.0/License,MIT -github.com/kylelemons/godebug,https://github.com/kylelemons/godebug/blob/v1.1.0/LICENSE,Apache-2.0 -github.com/mailru/easyjson,https://github.com/mailru/easyjson/blob/v0.9.0/LICENSE,MIT -github.com/miekg/dns,https://github.com/miekg/dns/blob/v1.1.62/LICENSE,BSD-3-Clause -github.com/mitchellh/go-homedir,https://github.com/mitchellh/go-homedir/blob/v1.1.0/LICENSE,MIT -github.com/mitchellh/mapstructure,https://github.com/mitchellh/mapstructure/blob/v1.5.0/LICENSE,MIT -github.com/modern-go/concurrent,https://github.com/modern-go/concurrent/blob/bacd9c7ef1dd/LICENSE,Apache-2.0 -github.com/modern-go/reflect2,https://github.com/modern-go/reflect2/blob/v1.0.2/LICENSE,Apache-2.0 -github.com/munnerz/goautoneg,https://github.com/munnerz/goautoneg/blob/a7dc8b61c822/LICENSE,BSD-3-Clause -github.com/patrickmn/go-cache,https://github.com/patrickmn/go-cache/blob/v2.1.0/LICENSE,MIT -github.com/pavlo-v-chernykh/keystore-go/v4,https://github.com/pavlo-v-chernykh/keystore-go/blob/v4.5.0/LICENSE,MIT -github.com/pierrec/lz4,https://github.com/pierrec/lz4/blob/v2.6.1/LICENSE,BSD-3-Clause -github.com/pkg/browser,https://github.com/pkg/browser/blob/5ac0b6a4141c/LICENSE,BSD-2-Clause -github.com/pkg/errors,https://github.com/pkg/errors/blob/v0.9.1/LICENSE,BSD-2-Clause -github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil,https://github.com/prometheus/client_golang/blob/v1.20.5/internal/github.com/golang/gddo/LICENSE,BSD-3-Clause -github.com/prometheus/client_golang/prometheus,https://github.com/prometheus/client_golang/blob/v1.20.5/LICENSE,Apache-2.0 -github.com/prometheus/client_model/go,https://github.com/prometheus/client_model/blob/v0.6.1/LICENSE,Apache-2.0 -github.com/prometheus/common,https://github.com/prometheus/common/blob/v0.61.0/LICENSE,Apache-2.0 -github.com/prometheus/procfs,https://github.com/prometheus/procfs/blob/v0.15.1/LICENSE,Apache-2.0 -github.com/rogpeppe/go-internal/fmtsort,https://github.com/rogpeppe/go-internal/blob/v1.13.1/LICENSE,BSD-3-Clause -github.com/ryanuber/go-glob,https://github.com/ryanuber/go-glob/blob/v1.0.0/LICENSE,MIT -github.com/sirupsen/logrus,https://github.com/sirupsen/logrus/blob/v1.9.3/LICENSE,MIT -github.com/sosodev/duration,https://github.com/sosodev/duration/blob/v1.3.1/LICENSE,MIT -github.com/spf13/cobra,https://github.com/spf13/cobra/blob/v1.8.1/LICENSE.txt,Apache-2.0 -github.com/spf13/pflag,https://github.com/spf13/pflag/blob/v1.0.5/LICENSE,BSD-3-Clause -github.com/stoewer/go-strcase,https://github.com/stoewer/go-strcase/blob/v1.3.0/LICENSE,MIT -github.com/vektah/gqlparser/v2,https://github.com/vektah/gqlparser/blob/v2.5.15/LICENSE,MIT -github.com/x448/float16,https://github.com/x448/float16/blob/v0.8.4/LICENSE,MIT -github.com/youmark/pkcs8,https://github.com/youmark/pkcs8/blob/3c2c7870ae76/LICENSE,MIT -go.etcd.io/etcd/api/v3,https://github.com/etcd-io/etcd/blob/api/v3.5.17/api/LICENSE,Apache-2.0 -go.etcd.io/etcd/client/pkg/v3,https://github.com/etcd-io/etcd/blob/client/pkg/v3.5.17/client/pkg/LICENSE,Apache-2.0 -go.etcd.io/etcd/client/v3,https://github.com/etcd-io/etcd/blob/client/v3.5.17/client/v3/LICENSE,Apache-2.0 -go.opencensus.io,https://github.com/census-instrumentation/opencensus-go/blob/v0.24.0/LICENSE,Apache-2.0 -go.opentelemetry.io/auto/sdk,https://github.com/open-telemetry/opentelemetry-go-instrumentation/blob/sdk/v1.1.0/sdk/LICENSE,Apache-2.0 -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc,https://github.com/open-telemetry/opentelemetry-go-contrib/blob/instrumentation/google.golang.org/grpc/otelgrpc/v0.58.0/instrumentation/google.golang.org/grpc/otelgrpc/LICENSE,Apache-2.0 -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp,https://github.com/open-telemetry/opentelemetry-go-contrib/blob/instrumentation/net/http/otelhttp/v0.58.0/instrumentation/net/http/otelhttp/LICENSE,Apache-2.0 -go.opentelemetry.io/otel,https://github.com/open-telemetry/opentelemetry-go/blob/v1.33.0/LICENSE,Apache-2.0 -go.opentelemetry.io/otel/exporters/otlp/otlptrace,https://github.com/open-telemetry/opentelemetry-go/blob/exporters/otlp/otlptrace/v1.33.0/exporters/otlp/otlptrace/LICENSE,Apache-2.0 -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc,https://github.com/open-telemetry/opentelemetry-go/blob/exporters/otlp/otlptrace/otlptracegrpc/v1.33.0/exporters/otlp/otlptrace/otlptracegrpc/LICENSE,Apache-2.0 -go.opentelemetry.io/otel/metric,https://github.com/open-telemetry/opentelemetry-go/blob/metric/v1.33.0/metric/LICENSE,Apache-2.0 -go.opentelemetry.io/otel/sdk,https://github.com/open-telemetry/opentelemetry-go/blob/sdk/v1.33.0/sdk/LICENSE,Apache-2.0 -go.opentelemetry.io/otel/trace,https://github.com/open-telemetry/opentelemetry-go/blob/trace/v1.33.0/trace/LICENSE,Apache-2.0 -go.opentelemetry.io/proto/otlp,https://github.com/open-telemetry/opentelemetry-proto-go/blob/otlp/v1.4.0/otlp/LICENSE,Apache-2.0 -go.uber.org/multierr,https://github.com/uber-go/multierr/blob/v1.11.0/LICENSE.txt,MIT -go.uber.org/zap,https://github.com/uber-go/zap/blob/v1.27.0/LICENSE,MIT -golang.org/x/crypto,https://cs.opensource.google/go/x/crypto/+/v0.38.0:LICENSE,BSD-3-Clause -golang.org/x/exp,https://cs.opensource.google/go/x/exp/+/b2144cdd:LICENSE,BSD-3-Clause -golang.org/x/net,https://cs.opensource.google/go/x/net/+/v0.38.0:LICENSE,BSD-3-Clause -golang.org/x/oauth2,https://cs.opensource.google/go/x/oauth2/+/v0.28.0:LICENSE,BSD-3-Clause -golang.org/x/sync,https://cs.opensource.google/go/x/sync/+/v0.14.0:LICENSE,BSD-3-Clause -golang.org/x/sys,https://cs.opensource.google/go/x/sys/+/v0.33.0:LICENSE,BSD-3-Clause -golang.org/x/term,https://cs.opensource.google/go/x/term/+/v0.32.0:LICENSE,BSD-3-Clause -golang.org/x/text,https://cs.opensource.google/go/x/text/+/v0.25.0:LICENSE,BSD-3-Clause -golang.org/x/time/rate,https://cs.opensource.google/go/x/time/+/v0.8.0:LICENSE,BSD-3-Clause -gomodules.xyz/jsonpatch/v2,https://github.com/gomodules/jsonpatch/blob/v2.4.0/v2/LICENSE,Apache-2.0 -google.golang.org/api,https://github.com/googleapis/google-api-go-client/blob/v0.198.0/LICENSE,BSD-3-Clause -google.golang.org/api/internal/third_party/uritemplates,https://github.com/googleapis/google-api-go-client/blob/v0.198.0/internal/third_party/uritemplates/LICENSE,BSD-3-Clause -google.golang.org/genproto/googleapis/api,https://github.com/googleapis/go-genproto/blob/6b3ec007d9bb/googleapis/api/LICENSE,Apache-2.0 -google.golang.org/genproto/googleapis/rpc,https://github.com/googleapis/go-genproto/blob/6b3ec007d9bb/googleapis/rpc/LICENSE,Apache-2.0 -google.golang.org/grpc,https://github.com/grpc/grpc-go/blob/v1.69.2/LICENSE,Apache-2.0 -google.golang.org/protobuf,https://github.com/protocolbuffers/protobuf-go/blob/v1.36.0/LICENSE,BSD-3-Clause -gopkg.in/evanphx/json-patch.v4,https://github.com/evanphx/json-patch/blob/v4.12.0/LICENSE,BSD-3-Clause -gopkg.in/inf.v0,https://github.com/go-inf/inf/blob/v0.9.1/LICENSE,BSD-3-Clause -gopkg.in/ini.v1,https://github.com/go-ini/ini/blob/v1.67.0/LICENSE,Apache-2.0 -gopkg.in/natefinch/lumberjack.v2,https://github.com/natefinch/lumberjack/blob/v2.2.1/LICENSE,MIT -gopkg.in/yaml.v2,https://github.com/go-yaml/yaml/blob/v2.4.0/LICENSE,Apache-2.0 -gopkg.in/yaml.v3,https://github.com/go-yaml/yaml/blob/v3.0.1/LICENSE,MIT -k8s.io/api,https://github.com/kubernetes/api/blob/v0.32.0/LICENSE,Apache-2.0 -k8s.io/apiextensions-apiserver/pkg,https://github.com/kubernetes/apiextensions-apiserver/blob/v0.32.0/LICENSE,Apache-2.0 -k8s.io/apimachinery/pkg,https://github.com/kubernetes/apimachinery/blob/v0.32.0/LICENSE,Apache-2.0 -k8s.io/apimachinery/third_party/forked/golang,https://github.com/kubernetes/apimachinery/blob/v0.32.0/third_party/forked/golang/LICENSE,BSD-3-Clause -k8s.io/apiserver,https://github.com/kubernetes/apiserver/blob/v0.32.0/LICENSE,Apache-2.0 -k8s.io/client-go,https://github.com/kubernetes/client-go/blob/v0.32.0/LICENSE,Apache-2.0 -k8s.io/component-base,https://github.com/kubernetes/component-base/blob/v0.32.0/LICENSE,Apache-2.0 -k8s.io/klog/v2,https://github.com/kubernetes/klog/blob/v2.130.1/LICENSE,Apache-2.0 -k8s.io/kms,https://github.com/kubernetes/kms/blob/v0.32.0/LICENSE,Apache-2.0 -k8s.io/kube-aggregator/pkg/apis/apiregistration,https://github.com/kubernetes/kube-aggregator/blob/v0.31.1/LICENSE,Apache-2.0 -k8s.io/kube-openapi/pkg,https://github.com/kubernetes/kube-openapi/blob/2c72e554b1e7/LICENSE,Apache-2.0 -k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json,https://github.com/kubernetes/kube-openapi/blob/2c72e554b1e7/pkg/internal/third_party/go-json-experiment/json/LICENSE,BSD-3-Clause -k8s.io/kube-openapi/pkg/internal/third_party/govalidator,https://github.com/kubernetes/kube-openapi/blob/2c72e554b1e7/pkg/internal/third_party/govalidator/LICENSE,MIT -k8s.io/kube-openapi/pkg/validation/errors,https://github.com/kubernetes/kube-openapi/blob/2c72e554b1e7/pkg/validation/errors/LICENSE,Apache-2.0 -k8s.io/kube-openapi/pkg/validation/spec,https://github.com/kubernetes/kube-openapi/blob/2c72e554b1e7/pkg/validation/spec/LICENSE,Apache-2.0 -k8s.io/kube-openapi/pkg/validation/strfmt,https://github.com/kubernetes/kube-openapi/blob/2c72e554b1e7/pkg/validation/strfmt/LICENSE,Apache-2.0 -k8s.io/utils,https://github.com/kubernetes/utils/blob/24370beab758/LICENSE,Apache-2.0 -k8s.io/utils/internal/third_party/forked/golang,https://github.com/kubernetes/utils/blob/24370beab758/internal/third_party/forked/golang/LICENSE,BSD-3-Clause -sigs.k8s.io/apiserver-network-proxy/konnectivity-client,https://github.com/kubernetes-sigs/apiserver-network-proxy/blob/konnectivity-client/v0.31.1/konnectivity-client/LICENSE,Apache-2.0 -sigs.k8s.io/controller-runtime,https://github.com/kubernetes-sigs/controller-runtime/blob/v0.19.0/LICENSE,Apache-2.0 -sigs.k8s.io/gateway-api,https://github.com/kubernetes-sigs/gateway-api/blob/v1.1.0/LICENSE,Apache-2.0 -sigs.k8s.io/json,https://github.com/kubernetes-sigs/json/blob/cfa47c3a1cc8/LICENSE,Apache-2.0 -sigs.k8s.io/json,https://github.com/kubernetes-sigs/json/blob/cfa47c3a1cc8/LICENSE,BSD-3-Clause -sigs.k8s.io/structured-merge-diff/v4,https://github.com/kubernetes-sigs/structured-merge-diff/blob/v4.5.0/LICENSE,Apache-2.0 -sigs.k8s.io/yaml,https://github.com/kubernetes-sigs/yaml/blob/v1.4.0/LICENSE,MIT -sigs.k8s.io/yaml,https://github.com/kubernetes-sigs/yaml/blob/v1.4.0/LICENSE,Apache-2.0 -sigs.k8s.io/yaml,https://github.com/kubernetes-sigs/yaml/blob/v1.4.0/LICENSE,BSD-3-Clause -sigs.k8s.io/yaml/goyaml.v2,https://github.com/kubernetes-sigs/yaml/blob/v1.4.0/goyaml.v2/LICENSE,Apache-2.0 -software.sslmate.com/src/go-pkcs12,https://github.com/SSLMate/go-pkcs12/blob/v0.5.0/LICENSE,BSD-3-Clause +This LICENSES file is generated by the `licenses` module in makefile-modules[0]. + +The licenses below the "---" are determined by the go-licenses tool[1]. + +The aim of this file is to collect the licenses of all dependencies, and provide +a single source of truth for licenses used by this project. + +## For Developers + +If CI reports that this file is out of date, you should be careful to check that the +new licenses are acceptable for this project before running `make generate-go-licenses` +to update this file. + +Acceptable licenses are those allowlisted by the CNCF[2]. + +You MUST NOT add any new dependencies whose licenses are not allowlisted by the CNCF, +or which do not have an explicit license exception[3]. + +## For Users + +If this file was included in a release artifact, it is a snapshot of the licenses of all dependencies at the time of the release. + +You can retrieve the actual license text by following these steps: + +1. Find the dependency name in this file +2. Go to the source code repository of this project, and go to the tag corresponding to this release. +3. Find the exact version of the dependency in the `go.mod` file +4. Search for the dependency at the correct version in the [Go package index](https://pkg.go.dev/). + +## Links + +[0]: https://github.com/cert-manager/makefile-modules/ +[1]: https://github.com/google/go-licenses +[2]: https://github.com/cncf/foundation/blob/db4179134ebe7fa00b140a050c19147db808b6fa/policies-guidance/allowed-third-party-license-policy.md#cncf-allowlist-license-policy +[3]: https://github.com/cncf/foundation/blob/db4179134ebe7fa00b140a050c19147db808b6fa/license-exceptions/README.md + +--- + +cel.dev/expr,Apache-2.0 +cloud.google.com/go/auth,Apache-2.0 +cloud.google.com/go/auth/oauth2adapt,Apache-2.0 +cloud.google.com/go/compute/metadata,Apache-2.0 +github.com/Azure/azure-sdk-for-go/sdk/azcore,MIT +github.com/Azure/azure-sdk-for-go/sdk/azidentity,MIT +github.com/Azure/azure-sdk-for-go/sdk/internal,MIT +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/dns/armdns,MIT +github.com/Azure/go-ntlmssp,MIT +github.com/AzureAD/microsoft-authentication-library-for-go/apps,MIT +github.com/Khan/genqlient/graphql,MIT +github.com/NYTimes/gziphandler,Apache-2.0 +github.com/Venafi/vcert/v5,Apache-2.0 +github.com/akamai/AkamaiOPEN-edgegrid-golang/v12/pkg,Apache-2.0 +github.com/antlr4-go/antlr/v4,BSD-3-Clause +github.com/aws/aws-sdk-go-v2,Apache-2.0 +github.com/aws/aws-sdk-go-v2/config,Apache-2.0 +github.com/aws/aws-sdk-go-v2/credentials,Apache-2.0 +github.com/aws/aws-sdk-go-v2/feature/ec2/imds,Apache-2.0 +github.com/aws/aws-sdk-go-v2/internal/configsources,Apache-2.0 +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2,Apache-2.0 +github.com/aws/aws-sdk-go-v2/internal/ini,Apache-2.0 +github.com/aws/aws-sdk-go-v2/internal/sync/singleflight,BSD-3-Clause +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding,Apache-2.0 +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url,Apache-2.0 +github.com/aws/aws-sdk-go-v2/service/route53,Apache-2.0 +github.com/aws/aws-sdk-go-v2/service/sso,Apache-2.0 +github.com/aws/aws-sdk-go-v2/service/ssooidc,Apache-2.0 +github.com/aws/aws-sdk-go-v2/service/sts,Apache-2.0 +github.com/aws/smithy-go,Apache-2.0 +github.com/aws/smithy-go/internal/sync/singleflight,BSD-3-Clause +github.com/benbjohnson/clock,MIT +github.com/beorn7/perks/quantile,MIT +github.com/blang/semver/v4,MIT +github.com/cenkalti/backoff/v4,MIT +github.com/cenkalti/backoff/v5,MIT +github.com/cert-manager/cert-manager,Apache-2.0 +github.com/cert-manager/cert-manager/pkg/issuer/acme/dns/azuredns,MIT +github.com/cert-manager/cert-manager/pkg/issuer/acme/dns/clouddns,MIT +github.com/cert-manager/cert-manager/pkg/issuer/acme/dns/cloudflare,MIT +github.com/cert-manager/cert-manager/pkg/issuer/acme/dns/route53,MIT +github.com/cert-manager/cert-manager/pkg/issuer/acme/dns/util,MIT +github.com/cert-manager/cert-manager/third_party/forked/acme,BSD-3-Clause +github.com/cespare/xxhash/v2,MIT +github.com/coreos/go-semver/semver,Apache-2.0 +github.com/coreos/go-systemd/v22,Apache-2.0 +github.com/davecgh/go-spew/spew,ISC +github.com/digitalocean/godo,MIT +github.com/digitalocean/godo,BSD-3-Clause +github.com/emicklei/go-restful/v3,MIT +github.com/evanphx/json-patch/v5,BSD-3-Clause +github.com/felixge/httpsnoop,MIT +github.com/fsnotify/fsnotify,BSD-3-Clause +github.com/fxamacker/cbor/v2,MIT +github.com/go-asn1-ber/asn1-ber,MIT +github.com/go-http-utils/headers,MIT +github.com/go-jose/go-jose/v4,Apache-2.0 +github.com/go-jose/go-jose/v4/json,BSD-3-Clause +github.com/go-ldap/ldap/v3,MIT +github.com/go-logr/logr,Apache-2.0 +github.com/go-logr/stdr,Apache-2.0 +github.com/go-logr/zapr,Apache-2.0 +github.com/go-openapi/jsonpointer,Apache-2.0 +github.com/go-openapi/jsonreference,Apache-2.0 +github.com/go-openapi/swag,Apache-2.0 +github.com/go-openapi/swag/jsonname,Apache-2.0 +github.com/go-ozzo/ozzo-validation/v4,MIT +github.com/gogo/protobuf,BSD-3-Clause +github.com/golang-jwt/jwt/v5,MIT +github.com/golang/protobuf/proto,BSD-3-Clause +github.com/golang/snappy,BSD-3-Clause +github.com/google/btree,Apache-2.0 +github.com/google/cel-go,Apache-2.0 +github.com/google/cel-go,BSD-3-Clause +github.com/google/certificate-transparency-go,Apache-2.0 +github.com/google/gnostic-models,Apache-2.0 +github.com/google/go-cmp/cmp,BSD-3-Clause +github.com/google/go-querystring/query,BSD-3-Clause +github.com/google/s2a-go,Apache-2.0 +github.com/google/uuid,BSD-3-Clause +github.com/googleapis/enterprise-certificate-proxy/client,Apache-2.0 +github.com/googleapis/gax-go/v2,BSD-3-Clause +github.com/gorilla/websocket,BSD-2-Clause +github.com/grpc-ecosystem/go-grpc-prometheus,Apache-2.0 +github.com/grpc-ecosystem/grpc-gateway/v2,BSD-3-Clause +github.com/hashicorp/errwrap,MPL-2.0 +github.com/hashicorp/go-cleanhttp,MPL-2.0 +github.com/hashicorp/go-hmac-drbg/hmacdrbg,MIT +github.com/hashicorp/go-multierror,MPL-2.0 +github.com/hashicorp/go-retryablehttp,MPL-2.0 +github.com/hashicorp/go-rootcerts,MPL-2.0 +github.com/hashicorp/go-secure-stdlib/cryptoutil,MPL-2.0 +github.com/hashicorp/go-secure-stdlib/parseutil,MPL-2.0 +github.com/hashicorp/go-secure-stdlib/strutil,MPL-2.0 +github.com/hashicorp/go-sockaddr,MPL-2.0 +github.com/hashicorp/hcl,MPL-2.0 +github.com/hashicorp/vault/api,MPL-2.0 +github.com/hashicorp/vault/sdk/helper,MPL-2.0 +github.com/josharian/intern,MIT +github.com/json-iterator/go,MIT +github.com/kylelemons/godebug,Apache-2.0 +github.com/mailru/easyjson,MIT +github.com/miekg/dns,BSD-3-Clause +github.com/mitchellh/go-homedir,MIT +github.com/mitchellh/mapstructure,MIT +github.com/modern-go/concurrent,Apache-2.0 +github.com/modern-go/reflect2,Apache-2.0 +github.com/munnerz/goautoneg,BSD-3-Clause +github.com/nrdcg/goacmedns,MIT +github.com/pavlo-v-chernykh/keystore-go/v4,MIT +github.com/pierrec/lz4,BSD-3-Clause +github.com/pkg/browser,BSD-2-Clause +github.com/pmezard/go-difflib/difflib,BSD-3-Clause +github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil,BSD-3-Clause +github.com/prometheus/client_golang/prometheus,Apache-2.0 +github.com/prometheus/client_model/go,Apache-2.0 +github.com/prometheus/common,Apache-2.0 +github.com/prometheus/procfs,Apache-2.0 +github.com/ryanuber/go-glob,MIT +github.com/sosodev/duration,MIT +github.com/spf13/cobra,Apache-2.0 +github.com/spf13/pflag,BSD-3-Clause +github.com/stoewer/go-strcase,MIT +github.com/stretchr/objx,MIT +github.com/stretchr/testify,MIT +github.com/vektah/gqlparser/v2,MIT +github.com/x448/float16,MIT +github.com/youmark/pkcs8,MIT +go.etcd.io/etcd/api/v3,Apache-2.0 +go.etcd.io/etcd/client/pkg/v3,Apache-2.0 +go.etcd.io/etcd/client/v3,Apache-2.0 +go.opentelemetry.io/auto/sdk,Apache-2.0 +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc,Apache-2.0 +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp,Apache-2.0 +go.opentelemetry.io/otel,Apache-2.0 +go.opentelemetry.io/otel/exporters/otlp/otlptrace,Apache-2.0 +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc,Apache-2.0 +go.opentelemetry.io/otel/metric,Apache-2.0 +go.opentelemetry.io/otel/sdk,Apache-2.0 +go.opentelemetry.io/otel/trace,Apache-2.0 +go.opentelemetry.io/proto/otlp,Apache-2.0 +go.uber.org/multierr,MIT +go.uber.org/ratelimit,MIT +go.uber.org/zap,MIT +go.yaml.in/yaml/v2,Apache-2.0 +go.yaml.in/yaml/v3,MIT +golang.org/x/crypto,BSD-3-Clause +golang.org/x/exp/slices,BSD-3-Clause +golang.org/x/net,BSD-3-Clause +golang.org/x/oauth2,BSD-3-Clause +golang.org/x/sync,BSD-3-Clause +golang.org/x/sys,BSD-3-Clause +golang.org/x/term,BSD-3-Clause +golang.org/x/text,BSD-3-Clause +golang.org/x/time/rate,BSD-3-Clause +gomodules.xyz/jsonpatch/v2,Apache-2.0 +google.golang.org/api,BSD-3-Clause +google.golang.org/api/internal/third_party/uritemplates,BSD-3-Clause +google.golang.org/genproto/googleapis/api,Apache-2.0 +google.golang.org/genproto/googleapis/rpc,Apache-2.0 +google.golang.org/grpc,Apache-2.0 +google.golang.org/protobuf,BSD-3-Clause +gopkg.in/evanphx/json-patch.v4,BSD-3-Clause +gopkg.in/inf.v0,BSD-3-Clause +gopkg.in/ini.v1,Apache-2.0 +gopkg.in/natefinch/lumberjack.v2,MIT +gopkg.in/yaml.v2,Apache-2.0 +gopkg.in/yaml.v3,MIT +k8s.io/api,Apache-2.0 +k8s.io/apiextensions-apiserver/pkg,Apache-2.0 +k8s.io/apimachinery/pkg,Apache-2.0 +k8s.io/apimachinery/third_party/forked/golang,BSD-3-Clause +k8s.io/apiserver,Apache-2.0 +k8s.io/client-go,Apache-2.0 +k8s.io/component-base,Apache-2.0 +k8s.io/klog/v2,Apache-2.0 +k8s.io/kms,Apache-2.0 +k8s.io/kube-aggregator/pkg/apis/apiregistration,Apache-2.0 +k8s.io/kube-openapi/pkg,Apache-2.0 +k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json,BSD-3-Clause +k8s.io/kube-openapi/pkg/internal/third_party/govalidator,MIT +k8s.io/kube-openapi/pkg/validation/errors,Apache-2.0 +k8s.io/kube-openapi/pkg/validation/spec,Apache-2.0 +k8s.io/kube-openapi/pkg/validation/strfmt,Apache-2.0 +k8s.io/utils,Apache-2.0 +k8s.io/utils/internal/third_party/forked/golang,BSD-3-Clause +sigs.k8s.io/apiserver-network-proxy/konnectivity-client,Apache-2.0 +sigs.k8s.io/controller-runtime,Apache-2.0 +sigs.k8s.io/gateway-api,Apache-2.0 +sigs.k8s.io/json,Apache-2.0 +sigs.k8s.io/json,BSD-3-Clause +sigs.k8s.io/randfill,Apache-2.0 +sigs.k8s.io/structured-merge-diff/v6,Apache-2.0 +sigs.k8s.io/yaml,MIT +sigs.k8s.io/yaml,Apache-2.0 +sigs.k8s.io/yaml,BSD-3-Clause +software.sslmate.com/src/go-pkcs12,BSD-3-Clause diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/doc.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/doc.go index 92b6583d6..5ba5e8f1c 100644 --- a/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/doc.go +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/doc.go @@ -16,5 +16,6 @@ limitations under the License. // Package v1 is the v1 version of the API. // +k8s:deepcopy-gen=package,register +// +k8s:openapi-gen=true // +groupName=acme.cert-manager.io package v1 diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types_challenge.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types_challenge.go index 34bae15b8..dc3bb1b37 100644 --- a/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types_challenge.go +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types_challenge.go @@ -25,15 +25,14 @@ import ( // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:storageversion - -// Challenge is a type to represent a Challenge request with an ACME server -// +k8s:openapi-gen=true // +kubebuilder:printcolumn:name="State",type="string",JSONPath=".status.state" // +kubebuilder:printcolumn:name="Domain",type="string",JSONPath=".spec.dnsName" // +kubebuilder:printcolumn:name="Reason",type="string",JSONPath=".status.reason",description="",priority=1 // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC." +// +kubebuilder:resource:scope=Namespaced,categories={cert-manager,cert-manager-acme} // +kubebuilder:subresource:status -// +kubebuilder:resource:path=challenges + +// Challenge is a type to represent a Challenge request with an ACME server type Challenge struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata"` @@ -98,7 +97,7 @@ type ChallengeSpec struct { // If the Issuer does not exist, processing will be retried. // If the Issuer is not an 'ACME' Issuer, an error will be returned and the // Challenge will be marked as failed. - IssuerRef cmmeta.ObjectReference `json:"issuerRef"` + IssuerRef cmmeta.IssuerReference `json:"issuerRef"` } // The type of ACME challenge. Only HTTP-01 and DNS-01 are supported. diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types_issuer.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types_issuer.go index 54cb4b97e..009b1abe8 100644 --- a/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types_issuer.go +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types_issuer.go @@ -96,6 +96,7 @@ type ACMEIssuer struct { // from an ACME server. // For more information, see: https://cert-manager.io/docs/configuration/acme/ // +optional + // +listType=atomic Solvers []ACMEChallengeSolver `json:"solvers,omitempty"` // Enables or disables generating a new ACME account key. @@ -196,6 +197,7 @@ type CertificateDNSNameSelector struct { // If neither has more matches, the solver defined earlier in the list // will be selected. // +optional + // +listType=atomic DNSNames []string `json:"dnsNames,omitempty"` // List of DNSZones that this solver will be used to solve. @@ -208,6 +210,7 @@ type CertificateDNSNameSelector struct { // If neither has more matches, the solver defined earlier in the list // will be selected. // +optional + // +listType=atomic DNSZones []string `json:"dnsZones,omitempty"` } @@ -290,6 +293,8 @@ type ACMEChallengeSolverHTTP01GatewayHTTPRoute struct { // cert-manager needs to know which parentRefs should be used when creating // the HTTPRoute. Usually, the parentRef references a Gateway. See: // https://gateway-api.sigs.k8s.io/api-types/httproute/#attaching-to-gateways + // +optional + // +listType=atomic ParentRefs []gwapi.ParentReference `json:"parentRefs,omitempty"` // Optional pod template used to configure the ACME challenge solver pods @@ -336,6 +341,7 @@ type ACMEChallengeSolverHTTP01IngressPodSpec struct { // If specified, the pod's tolerations. // +optional + // +listType=atomic Tolerations []corev1.Toleration `json:"tolerations,omitempty"` // If specified, the pod's priorityClassName. @@ -348,11 +354,24 @@ type ACMEChallengeSolverHTTP01IngressPodSpec struct { // If specified, the pod's imagePullSecrets // +optional + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty" patchMergeKey:"name" patchStrategy:"merge"` // If specified, the pod's security context // +optional SecurityContext *ACMEChallengeSolverHTTP01IngressPodSecurityContext `json:"securityContext,omitempty"` + + // If specified, the pod's resource requirements. + // These values override the global resource configuration flags. + // Note that when only specifying resource limits, ensure they are greater than or equal + // to the corresponding global resource requests configured via controller flags + // (--acme-http01-solver-resource-request-cpu, --acme-http01-solver-resource-request-memory). + // Kubernetes will reject pod creation if limits are lower than requests, causing challenge failures. + // +optional + Resources *ACMEChallengeSolverHTTP01IngressPodResources `json:"resources,omitempty"` } type ACMEChallengeSolverHTTP01IngressTemplate struct { @@ -464,6 +483,7 @@ type ACMEChallengeSolverHTTP01IngressPodSecurityContext struct { // even if they are not included in this list. // Note that this field cannot be set when spec.os.name is windows. // +optional + // +listType=atomic SupplementalGroups []int64 `json:"supplementalGroups,omitempty"` // A special supplemental group that applies to all containers in a pod. // Some volume types allow the Kubelet to change the ownership of that volume @@ -481,6 +501,7 @@ type ACMEChallengeSolverHTTP01IngressPodSecurityContext struct { // sysctls (by the container runtime) might fail to launch. // Note that this field cannot be set when spec.os.name is windows. // +optional + // +listType=atomic Sysctls []corev1.Sysctl `json:"sysctls,omitempty"` // fsGroupChangePolicy defines behavior of changing ownership and permission of the volume // before being exposed inside Pod. This field will only apply to @@ -497,6 +518,21 @@ type ACMEChallengeSolverHTTP01IngressPodSecurityContext struct { SeccompProfile *corev1.SeccompProfile `json:"seccompProfile,omitempty"` } +// ACMEChallengeSolverHTTP01IngressPodResources defines resource requirements for ACME HTTP01 solver pods. +// To keep API surface essential, this trims down the 'corev1.ResourceRequirements' type to only include the Requests and Limits fields. +type ACMEChallengeSolverHTTP01IngressPodResources struct { + // Limits describes the maximum amount of compute resources allowed. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // +optional + Limits corev1.ResourceList `json:"limits,omitempty"` + // Requests describes the minimum amount of compute resources required. + // If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + // otherwise to the global values configured via controller flags. Requests cannot exceed Limits. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // +optional + Requests corev1.ResourceList `json:"requests,omitempty"` +} + // CNAMEStrategy configures how the DNS01 provider should handle CNAME records // when found in DNS zones. // By default, the None strategy will be applied (i.e. do not follow CNAMEs). @@ -658,6 +694,7 @@ type ServiceAccountRef struct { // and name is always included. // If unset the audience defaults to `sts.amazonaws.com`. // +optional + // +listType=atomic TokenAudiences []string `json:"audiences,omitempty"` } @@ -764,8 +801,22 @@ type ACMEIssuerDNS01ProviderRFC2136 struct { // ``HMACSHA1``, ``HMACSHA256`` or ``HMACSHA512``. // +optional TSIGAlgorithm string `json:"tsigAlgorithm,omitempty"` + + // Protocol to use for dynamic DNS update queries. Valid values are (case-sensitive) ``TCP`` and ``UDP``; ``UDP`` (default). + // +optional + Protocol RFC2136UpdateProtocol `json:"protocol,omitempty"` } +// +kubebuilder:validation:Enum=TCP;UDP +type RFC2136UpdateProtocol string + +const ( + // RFC2136UpdateProtocolTCP utilizes TCP to update queries. + RFC2136UpdateProtocolTCP RFC2136UpdateProtocol = "TCP" + // RFC2136UpdateProtocolUDP utilizes UDP to update queries. + RFC2136UpdateProtocolUDP RFC2136UpdateProtocol = "UDP" +) + // ACMEIssuerDNS01ProviderWebhook specifies configuration for a webhook DNS01 // provider, including where to POST ChallengePayload resources. type ACMEIssuerDNS01ProviderWebhook struct { diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types_order.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types_order.go index c03a6a903..e7e199c31 100644 --- a/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types_order.go +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/types_order.go @@ -25,9 +25,14 @@ import ( // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:storageversion +// +kubebuilder:printcolumn:name="State",type="string",JSONPath=".status.state" +// +kubebuilder:printcolumn:name="Issuer",type="string",JSONPath=".spec.issuerRef.name",priority=1 +// +kubebuilder:printcolumn:name="Reason",type="string",JSONPath=".status.reason",description="",priority=1 +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC." +// +kubebuilder:resource:scope=Namespaced,categories={cert-manager,cert-manager-acme} +// +kubebuilder:subresource:status // Order is a type to represent an Order with an ACME server -// +k8s:openapi-gen=true type Order struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata"` @@ -58,7 +63,7 @@ type OrderSpec struct { // If the Issuer does not exist, processing will be retried. // If the Issuer is not an 'ACME' Issuer, an error will be returned and the // Order will be marked as failed. - IssuerRef cmmeta.ObjectReference `json:"issuerRef"` + IssuerRef cmmeta.IssuerReference `json:"issuerRef"` // CommonName is the common name as specified on the DER encoded CSR. // If specified, this value must also be present in `dnsNames` or `ipAddresses`. @@ -69,13 +74,15 @@ type OrderSpec struct { // DNSNames is a list of DNS names that should be included as part of the Order // validation process. // This field must match the corresponding field on the DER encoded CSR. - //+optional + // +optional + // +listType=atomic DNSNames []string `json:"dnsNames,omitempty"` // IPAddresses is a list of IP addresses that should be included as part of the Order // validation process. // This field must match the corresponding field on the DER encoded CSR. // +optional + // +listType=atomic IPAddresses []string `json:"ipAddresses,omitempty"` // Duration is the duration for the not after date for the requested certificate. @@ -106,6 +113,7 @@ type OrderStatus struct { // authorizations must be completed in order to validate the DNS names // specified on the Order. // +optional + // +listType=atomic Authorizations []ACMEAuthorization `json:"authorizations,omitempty"` // Certificate is a copy of the PEM encoded certificate for this Order. @@ -166,6 +174,7 @@ type ACMEAuthorization struct { // name and an appropriate Challenge resource will be created to perform // the ACME challenge process. // +optional + // +listType=atomic Challenges []ACMEChallenge `json:"challenges,omitempty"` } diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/zz_generated.deepcopy.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/zz_generated.deepcopy.go index 09f27f5cc..e1b4500da 100644 --- a/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/acme/v1/zz_generated.deepcopy.go @@ -321,6 +321,36 @@ func (in *ACMEChallengeSolverHTTP01IngressPodObjectMeta) DeepCopy() *ACMEChallen return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ACMEChallengeSolverHTTP01IngressPodResources) DeepCopyInto(out *ACMEChallengeSolverHTTP01IngressPodResources) { + *out = *in + if in.Limits != nil { + in, out := &in.Limits, &out.Limits + *out = make(corev1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = make(corev1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ACMEChallengeSolverHTTP01IngressPodResources. +func (in *ACMEChallengeSolverHTTP01IngressPodResources) DeepCopy() *ACMEChallengeSolverHTTP01IngressPodResources { + if in == nil { + return nil + } + out := new(ACMEChallengeSolverHTTP01IngressPodResources) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ACMEChallengeSolverHTTP01IngressPodSecurityContext) DeepCopyInto(out *ACMEChallengeSolverHTTP01IngressPodSecurityContext) { *out = *in @@ -414,6 +444,11 @@ func (in *ACMEChallengeSolverHTTP01IngressPodSpec) DeepCopyInto(out *ACMEChallen *out = new(ACMEChallengeSolverHTTP01IngressPodSecurityContext) (*in).DeepCopyInto(*out) } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(ACMEChallengeSolverHTTP01IngressPodResources) + (*in).DeepCopyInto(*out) + } return } diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/doc.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/doc.go index 348211c68..3830f7af3 100644 --- a/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/doc.go +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/doc.go @@ -16,6 +16,7 @@ limitations under the License. // Package v1 is the v1 version of the API. // +k8s:deepcopy-gen=package,register +// +k8s:openapi-gen=true // +groupName=cert-manager.io // +groupGoName=Certmanager package v1 diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_certificate.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_certificate.go index 3208068c9..bc5475a32 100644 --- a/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_certificate.go +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_certificate.go @@ -27,6 +27,13 @@ import ( // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:storageversion +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=`.status.conditions[?(@.type == "Ready")].status` +// +kubebuilder:printcolumn:name="Secret",type="string",JSONPath=`.spec.secretName` +// +kubebuilder:printcolumn:name="Issuer",type="string",JSONPath=`.spec.issuerRef.name`,priority=1 +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=`.status.conditions[?(@.type == "Ready")].message`,priority=1 +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=`.metadata.creationTimestamp`,description="CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC." +// +kubebuilder:resource:scope=Namespaced,shortName={cert,certs},categories=cert-manager +// +kubebuilder:subresource:status // A Certificate resource should be created to ensure an up to date and signed // X.509 certificate is stored in the Kubernetes Secret resource named in `spec.secretName`. @@ -200,14 +207,17 @@ type CertificateSpec struct { // Requested DNS subject alternative names. // +optional + // +listType=atomic DNSNames []string `json:"dnsNames,omitempty"` // Requested IP address subject alternative names. // +optional + // +listType=atomic IPAddresses []string `json:"ipAddresses,omitempty"` // Requested URI subject alternative names. // +optional + // +listType=atomic URIs []string `json:"uris,omitempty"` // `otherNames` is an escape hatch for SAN that allows any type. We currently restrict the support to string like otherNames, cf RFC 5280 p 37 @@ -215,10 +225,12 @@ type CertificateSpec struct { // Most commonly this would be UPN set with oid: 1.3.6.1.4.1.311.20.2.3 // You should ensure that any OID passed is valid for the UTF8String type as we do not explicitly validate this. // +optional + // +listType=atomic OtherNames []OtherName `json:"otherNames,omitempty"` // Requested email subject alternative names. // +optional + // +listType=atomic EmailAddresses []string `json:"emailAddresses,omitempty"` // Name of the Secret resource that will be automatically created and @@ -245,7 +257,7 @@ type CertificateSpec struct { // from any namespace. // // The `name` field of the reference must always be specified. - IssuerRef cmmeta.ObjectReference `json:"issuerRef"` + IssuerRef cmmeta.IssuerReference `json:"issuerRef"` // Requested basic constraints isCA value. // The isCA value is used to set the `isCA` field on the created CertificateRequest @@ -264,6 +276,7 @@ type CertificateSpec struct { // // If unset, defaults to `digital signature` and `key encipherment`. // +optional + // +listType=atomic Usages []KeyUsage `json:"usages,omitempty"` // Private key options. These include the key algorithm and size, the used @@ -299,6 +312,7 @@ type CertificateSpec struct { // Defines extra output formats of the private key and signed certificate chain // to be written to this Certificate's target Secret. // +optional + // +listType=atomic AdditionalOutputFormats []CertificateAdditionalOutputFormat `json:"additionalOutputFormats,omitempty"` // x.509 certificate NameConstraint extension which MUST NOT be used in a non-CA certificate. @@ -439,24 +453,31 @@ type CertificateAdditionalOutputFormat struct { type X509Subject struct { // Organizations to be used on the Certificate. // +optional + // +listType=atomic Organizations []string `json:"organizations,omitempty"` // Countries to be used on the Certificate. // +optional + // +listType=atomic Countries []string `json:"countries,omitempty"` // Organizational Units to be used on the Certificate. // +optional + // +listType=atomic OrganizationalUnits []string `json:"organizationalUnits,omitempty"` // Cities to be used on the Certificate. // +optional + // +listType=atomic Localities []string `json:"localities,omitempty"` // State/Provinces to be used on the Certificate. // +optional + // +listType=atomic Provinces []string `json:"provinces,omitempty"` // Street addresses to be used on the Certificate. // +optional + // +listType=atomic StreetAddresses []string `json:"streetAddresses,omitempty"` // Postal codes to be used on the Certificate. // +optional + // +listType=atomic PostalCodes []string `json:"postalCodes,omitempty"` // Serial number to be used on the Certificate. // +optional @@ -568,9 +589,9 @@ const ( type CertificateStatus struct { // List of status conditions to indicate the status of certificates. // Known condition types are `Ready` and `Issuing`. + // +optional // +listType=map // +listMapKey=type - // +optional Conditions []CertificateCondition `json:"conditions,omitempty"` // LastFailureTime is set only if the latest issuance for this @@ -729,18 +750,22 @@ type NameConstraintItem struct { // DNSDomains is a list of DNS domains that are permitted or excluded. // // +optional + // +listType=atomic DNSDomains []string `json:"dnsDomains,omitempty"` // IPRanges is a list of IP Ranges that are permitted or excluded. // This should be a valid CIDR notation. // // +optional + // +listType=atomic IPRanges []string `json:"ipRanges,omitempty"` // EmailAddresses is a list of Email Addresses that are permitted or excluded. // // +optional + // +listType=atomic EmailAddresses []string `json:"emailAddresses,omitempty"` // URIDomains is a list of URI domains that are permitted or excluded. // // +optional + // +listType=atomic URIDomains []string `json:"uriDomains,omitempty"` } diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_certificaterequest.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_certificaterequest.go index 8f31d84c0..a948f1129 100644 --- a/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_certificaterequest.go +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_certificaterequest.go @@ -45,6 +45,15 @@ const ( // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:storageversion +// +kubebuilder:printcolumn:name="Approved",type="string",JSONPath=`.status.conditions[?(@.type == "Approved")].status` +// +kubebuilder:printcolumn:name="Denied",type="string",JSONPath=`.status.conditions[?(@.type == "Denied")].status` +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=`.status.conditions[?(@.type == "Ready")].status` +// +kubebuilder:printcolumn:name="Issuer",type="string",JSONPath=`.spec.issuerRef.name` +// +kubebuilder:printcolumn:name="Requester",type="string",JSONPath=`.spec.username` +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=`.status.conditions[?(@.type == "Ready")].message`,priority=1 +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=`.metadata.creationTimestamp`,description="CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC." +// +kubebuilder:resource:scope=Namespaced,shortName={cr,crs},categories=cert-manager +// +kubebuilder:subresource:status // A CertificateRequest is used to request a signed certificate from one of the // configured issuers. @@ -55,7 +64,6 @@ const ( // // A CertificateRequest is a one-shot resource, meaning it represents a single // point in time request for a certificate and cannot be re-used. -// +k8s:openapi-gen=true type CertificateRequest struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -110,7 +118,7 @@ type CertificateRequestSpec struct { // from any namespace. // // The `name` field of the reference must always be specified. - IssuerRef cmmeta.ObjectReference `json:"issuerRef"` + IssuerRef cmmeta.IssuerReference `json:"issuerRef"` // The PEM-encoded X.509 certificate signing request to be submitted to the // issuer for signing. @@ -143,6 +151,7 @@ type CertificateRequestSpec struct { // // If unset, defaults to `digital signature` and `key encipherment`. // +optional + // +listType=atomic Usages []KeyUsage `json:"usages,omitempty"` // Username contains the name of the user that created the CertificateRequest. @@ -155,8 +164,8 @@ type CertificateRequestSpec struct { UID string `json:"uid,omitempty"` // Groups contains group membership of the user that created the CertificateRequest. // Populated by the cert-manager webhook on creation and immutable. - // +listType=atomic // +optional + // +listType=atomic Groups []string `json:"groups,omitempty"` // Extra contains extra attributes of the user that created the CertificateRequest. // Populated by the cert-manager webhook on creation and immutable. @@ -169,9 +178,9 @@ type CertificateRequestSpec struct { type CertificateRequestStatus struct { // List of status conditions to indicate the status of a CertificateRequest. // Known condition types are `Ready`, `InvalidRequest`, `Approved` and `Denied`. + // +optional // +listType=map // +listMapKey=type - // +optional Conditions []CertificateRequestCondition `json:"conditions,omitempty"` // The PEM encoded X.509 certificate resulting from the certificate diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_issuer.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_issuer.go index 90a55fcea..1cbd93f95 100644 --- a/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_issuer.go +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1/types_issuer.go @@ -25,9 +25,13 @@ import ( // +genclient // +genclient:nonNamespaced -// +k8s:openapi-gen=true // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:storageversion +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=`.status.conditions[?(@.type == "Ready")].status` +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=`.status.conditions[?(@.type == "Ready")].message`,priority=1 +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=`.metadata.creationTimestamp`,description="CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC." +// +kubebuilder:resource:scope=Cluster,shortName=ciss,categories=cert-manager +// +kubebuilder:subresource:status // A ClusterIssuer represents a certificate issuing authority which can be // referenced as part of `issuerRef` fields. @@ -57,9 +61,13 @@ type ClusterIssuerList struct { } // +genclient -// +k8s:openapi-gen=true // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:storageversion +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=`.status.conditions[?(@.type == "Ready")].status` +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=`.status.conditions[?(@.type == "Ready")].message`,priority=1 +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=`.metadata.creationTimestamp`,description="CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC." +// +kubebuilder:resource:scope=Namespaced,shortName=iss,categories=cert-manager +// +kubebuilder:subresource:status // An Issuer represents a certificate issuing authority which can be // referenced as part of `issuerRef` fields. @@ -188,6 +196,7 @@ type SelfSignedIssuer struct { // the location of the CRL from which the revocation of this certificate can be checked. // If not set certificate will be issued without CDP. Values are strings. // +optional + // +listType=atomic CRLDistributionPoints []string `json:"crlDistributionPoints,omitempty"` } @@ -348,6 +357,7 @@ type ServiceAccountRef struct { // TokenAudiences is an optional list of extra audiences to include in the token passed to Vault. The default token // consisting of the issuer's namespace and name is always included. // +optional + // +listType=atomic TokenAudiences []string `json:"audiences,omitempty"` } @@ -360,6 +370,7 @@ type CAIssuer struct { // the location of the CRL from which the revocation of this certificate can be checked. // If not set, certificates will be issued without distribution points set. // +optional + // +listType=atomic CRLDistributionPoints []string `json:"crlDistributionPoints,omitempty"` // The OCSP server list is an X.509 v3 extension that defines a list of @@ -368,12 +379,14 @@ type CAIssuer struct { // certificate will be issued with no OCSP servers set. For example, an // OCSP server URL could be "http://ocsp.int-x3.letsencrypt.org". // +optional + // +listType=atomic OCSPServers []string `json:"ocspServers,omitempty"` // IssuingCertificateURLs is a list of URLs which this issuer should embed into certificates // it creates. See https://www.rfc-editor.org/rfc/rfc5280#section-4.2.2.1 for more details. // As an example, such a URL might be "http://ca.domain.com/ca.crt". // +optional + // +listType=atomic IssuingCertificateURLs []string `json:"issuingCertificateURLs,omitempty"` } @@ -381,9 +394,9 @@ type CAIssuer struct { type IssuerStatus struct { // List of status conditions to indicate the status of a CertificateRequest. // Known condition types are `Ready`. + // +optional // +listType=map // +listMapKey=type - // +optional Conditions []IssuerCondition `json:"conditions,omitempty"` // ACME specific status options. diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/meta/doc.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/meta/doc.go index f391663af..378fecfb2 100644 --- a/vendor/github.com/cert-manager/cert-manager/pkg/apis/meta/doc.go +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/meta/doc.go @@ -14,8 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +groupName=meta.cert-manager.io - // Package meta contains meta types for cert-manager APIs package meta diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/meta/v1/doc.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/meta/v1/doc.go index 9a673685d..7b5d3d1fb 100644 --- a/vendor/github.com/cert-manager/cert-manager/pkg/apis/meta/v1/doc.go +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/meta/v1/doc.go @@ -16,6 +16,6 @@ limitations under the License. // Package v1 contains meta types for cert-manager APIs // +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true // +gencrdrefdocs:force -// +groupName=meta.cert-manager.io package v1 diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/meta/v1/types.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/meta/v1/types.go index 80723a6c0..2b294e1a9 100644 --- a/vendor/github.com/cert-manager/cert-manager/pkg/apis/meta/v1/types.go +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/meta/v1/types.go @@ -48,18 +48,24 @@ type LocalObjectReference struct { Name string `json:"name"` } -// ObjectReference is a reference to an object with a given name, kind and group. -type ObjectReference struct { - // Name of the resource being referred to. +// IssuerReference is a reference to a certificate issuer object with a given name, kind and group. +type IssuerReference struct { + // Name of the issuer being referred to. Name string `json:"name"` - // Kind of the resource being referred to. + // Kind of the issuer being referred to. + // Defaults to 'Issuer'. // +optional Kind string `json:"kind,omitempty"` - // Group of the resource being referred to. + // Group of the issuer being referred to. + // Defaults to 'cert-manager.io'. // +optional Group string `json:"group,omitempty"` } +// ObjectReference is a reference to an object with a given name, kind and group. +// Deprecated: Use IssuerReference instead. +type ObjectReference = IssuerReference + // A reference to a specific 'key' within a Secret resource. // In some instances, `key` is a required field. type SecretKeySelector struct { diff --git a/vendor/github.com/cert-manager/cert-manager/pkg/apis/meta/v1/zz_generated.deepcopy.go b/vendor/github.com/cert-manager/cert-manager/pkg/apis/meta/v1/zz_generated.deepcopy.go index 9fa10e5e6..0d4af0708 100644 --- a/vendor/github.com/cert-manager/cert-manager/pkg/apis/meta/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/cert-manager/cert-manager/pkg/apis/meta/v1/zz_generated.deepcopy.go @@ -22,33 +22,33 @@ limitations under the License. package v1 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LocalObjectReference) DeepCopyInto(out *LocalObjectReference) { +func (in *IssuerReference) DeepCopyInto(out *IssuerReference) { *out = *in return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalObjectReference. -func (in *LocalObjectReference) DeepCopy() *LocalObjectReference { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IssuerReference. +func (in *IssuerReference) DeepCopy() *IssuerReference { if in == nil { return nil } - out := new(LocalObjectReference) + out := new(IssuerReference) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ObjectReference) DeepCopyInto(out *ObjectReference) { +func (in *LocalObjectReference) DeepCopyInto(out *LocalObjectReference) { *out = *in return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectReference. -func (in *ObjectReference) DeepCopy() *ObjectReference { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalObjectReference. +func (in *LocalObjectReference) DeepCopy() *LocalObjectReference { if in == nil { return nil } - out := new(ObjectReference) + out := new(LocalObjectReference) in.DeepCopyInto(out) return out } diff --git a/vendor/github.com/containerd/containerd/version/version.go b/vendor/github.com/containerd/containerd/version/version.go index 907553466..f20a57a35 100644 --- a/vendor/github.com/containerd/containerd/version/version.go +++ b/vendor/github.com/containerd/containerd/version/version.go @@ -23,7 +23,7 @@ var ( Package = "github.com/containerd/containerd" // Version holds the complete version number. Filled in at linking time. - Version = "1.7.29+unknown" + Version = "1.7.30+unknown" // Revision is filled with the VCS (e.g. git) revision being used to build // the program at linking time. diff --git a/vendor/github.com/cucumber/gherkin/go/v26/.gitignore b/vendor/github.com/cucumber/gherkin/go/v26/.gitignore new file mode 100644 index 000000000..7b0ee7aeb --- /dev/null +++ b/vendor/github.com/cucumber/gherkin/go/v26/.gitignore @@ -0,0 +1,17 @@ +.built +.compared +.deps +.dist +.dist-compressed +.go-get +.gofmt +.linted +.tested* +acceptance/ +bin/ +dist/ +dist_compressed/ +*.bin +*.iml +# upx dist/cucumber-gherkin-openbsd-386 fails with a core dump +core.*.!usr!bin!upx-ucl diff --git a/vendor/github.com/cucumber/gherkin/go/v26/LICENSE b/vendor/github.com/cucumber/gherkin/go/v26/LICENSE new file mode 100644 index 000000000..29e136102 --- /dev/null +++ b/vendor/github.com/cucumber/gherkin/go/v26/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) Cucumber Ltd, Gaspar Nagy, Björn Rasmusson, Peter Sergeant + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/cucumber/gherkin/go/v26/Makefile b/vendor/github.com/cucumber/gherkin/go/v26/Makefile new file mode 100644 index 000000000..d7d420854 --- /dev/null +++ b/vendor/github.com/cucumber/gherkin/go/v26/Makefile @@ -0,0 +1,83 @@ +SHELL := /usr/bin/env bash + +GHERKIN_LANGUAGES_JSON = dialects_builtin.go +GHERKIN_PARSER = parser.go +GHERKIN_RAZOR = parser.go.razor +SOURCE_FILES = $(shell find . -name "*.go" | grep -v $(GHERKIN_PARSER)) + +GHERKIN = bin/gherkin +GHERKIN_GENERATE_TOKENS = bin/gherkin-generate-tokens + +GOOD_FEATURE_FILES = $(shell find ../testdata/good -name "*.feature") +BAD_FEATURE_FILES = $(shell find ../testdata/bad -name "*.feature") + +TOKENS = $(patsubst ../testdata/%,acceptance/testdata/%.tokens,$(GOOD_FEATURE_FILES)) +ASTS = $(patsubst ../testdata/%,acceptance/testdata/%.ast.ndjson,$(GOOD_FEATURE_FILES)) +PICKLES = $(patsubst ../testdata/%,acceptance/testdata/%.pickles.ndjson,$(GOOD_FEATURE_FILES)) +SOURCES = $(patsubst ../testdata/%,acceptance/testdata/%.source.ndjson,$(GOOD_FEATURE_FILES)) +ERRORS = $(patsubst ../testdata/%,acceptance/testdata/%.errors.ndjson,$(BAD_FEATURE_FILES)) + +.DEFAULT_GOAL = help + +help: ## Show this help + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \n\nWhere is one of:\n"} /^[$$()% a-zA-Z_-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) + +generate: $(GHERKIN_PARSER) ## Generate gherkin parser files + +clean-generate: ## Remove generated Gherkin parser files ## Generate gherkin parser files + rm -f $(GHERKIN_PARSER) + +copy-gherkin-languages: $(GHERKIN_LANGUAGES_JSON) ## Copy gherkin-languages.json and/or generate derived files + +clean-gherkin-languages: ## Remove gherkin-languages.json and any derived files + rm -f $(GHERKIN_LANGUAGES_JSON) + +clean: ## Remove all build artifacts and files generated by the acceptance tests + rm -rf .built + rm -rf acceptance + rm -rf bin + +.DELETE_ON_ERROR: + +acceptance: .built $(TOKENS) $(ASTS) $(PICKLES) $(ERRORS) $(SOURCES) ## Build acceptance test dir and compare results with reference + +.built: bin/gherkin-generate-tokens bin/gherkin + touch $@ + +bin/gherkin-generate-tokens: + go build -o $@ ./gherkin-generate-tokens + +bin/gherkin: + go build -o $@ -a ./main + +dialects_builtin.go: ../gherkin-languages.json dialects_builtin.go.jq + cat $< | jq --sort-keys --from-file dialects_builtin.go.jq --raw-output --compact-output > $@ + +$(GHERKIN_PARSER): $(GHERKIN_RAZOR) ../gherkin.berp + berp -g ../gherkin.berp -t $< -o $@ --noBOM + gofmt -w $@ + +acceptance/testdata/%.tokens: ../testdata/% ../testdata/%.tokens + mkdir -p $(@D) + $(GHERKIN_GENERATE_TOKENS) $< > $@ + diff --unified $<.tokens $@ + +acceptance/testdata/%.ast.ndjson: ../testdata/% ../testdata/%.ast.ndjson + mkdir -p $(@D) + $(GHERKIN) --no-source --no-pickles --predictable-ids $< | jq --sort-keys --compact-output "." > $@ + diff --unified <(jq "." $<.ast.ndjson) <(jq "." $@) + +acceptance/testdata/%.pickles.ndjson: ../testdata/% ../testdata/%.pickles.ndjson + mkdir -p $(@D) + $(GHERKIN) --no-source --no-ast --predictable-ids $< | jq --sort-keys --compact-output "." > $@ + diff --unified <(jq "." $<.pickles.ndjson) <(jq "." $@) + +acceptance/testdata/%.source.ndjson: ../testdata/% ../testdata/%.source.ndjson + mkdir -p $(@D) + $(GHERKIN) --no-ast --no-pickles --predictable-ids $< | jq --sort-keys --compact-output "." > $@ + diff --unified <(jq "." $<.source.ndjson) <(jq "." $@) + +acceptance/testdata/%.errors.ndjson: ../testdata/% ../testdata/%.errors.ndjson + mkdir -p $(@D) + $(GHERKIN) --no-source --predictable-ids $< | jq --sort-keys --compact-output "." > $@ + diff --unified <(jq "." $<.errors.ndjson) <(jq "." $@) diff --git a/vendor/github.com/cucumber/gherkin/go/v26/README.md b/vendor/github.com/cucumber/gherkin/go/v26/README.md new file mode 100644 index 000000000..cdaba03c9 --- /dev/null +++ b/vendor/github.com/cucumber/gherkin/go/v26/README.md @@ -0,0 +1,37 @@ +# Gherkin for Go + +[![GoDoc](https://pkg.go.dev/github.com/cucumber/gherkin/go?status.svg)](http://godoc.org/github.com/cucumber/gherkin/go) + +Gherkin parser/compiler for Go. Please see [Gherkin](https://github.com/cucumber/gherkin) for details. + +## Building + +You need Go installed (obviously). You also need to make sure your `PATH` +points to where Go installs packages: + +```bash +# Add go bin to path +export PATH=$(go env GOPATH)/bin:${PATH} +``` + +Now build it: + +``` +make .dist +``` + +You should have cross-compiled binaries in `./dist/`. + +## Compress binaries + +You need [upx](https://upx.github.io/) installed. + +``` +make .dist +make .dist-compressed +``` + +Your `./dist_compressed/` directory should now have compressed binaries. +Compression fails for some binaries, so you likely won't have a full set. + +The build copies the successfully compressed binaries back to `./dist/`. diff --git a/vendor/github.com/cucumber/gherkin/go/v26/astbuilder.go b/vendor/github.com/cucumber/gherkin/go/v26/astbuilder.go new file mode 100644 index 000000000..54853d02b --- /dev/null +++ b/vendor/github.com/cucumber/gherkin/go/v26/astbuilder.go @@ -0,0 +1,453 @@ +package gherkin + +import ( + "github.com/cucumber/messages/go/v21" + "strings" +) + +type AstBuilder interface { + Builder + GetGherkinDocument() *messages.GherkinDocument +} + +type astBuilder struct { + stack []*astNode + comments []*messages.Comment + newId func() string +} + +func (t *astBuilder) Reset() { + t.comments = []*messages.Comment{} + t.stack = []*astNode{} + t.push(newAstNode(RuleTypeNone)) +} + +func (t *astBuilder) GetGherkinDocument() *messages.GherkinDocument { + res := t.currentNode().getSingle(RuleTypeGherkinDocument, nil) + if val, ok := res.(*messages.GherkinDocument); ok { + return val + } + return nil +} + +type astNode struct { + ruleType RuleType + subNodes map[RuleType][]interface{} +} + +func (a *astNode) add(rt RuleType, obj interface{}) { + a.subNodes[rt] = append(a.subNodes[rt], obj) +} + +func (a *astNode) getSingle(rt RuleType, defaultValue interface{}) interface{} { + if val, ok := a.subNodes[rt]; ok { + for i := range val { + return val[i] + } + } + return defaultValue +} + +func (a *astNode) getItems(rt RuleType) []interface{} { + var res []interface{} + if val, ok := a.subNodes[rt]; ok { + for i := range val { + res = append(res, val[i]) + } + } + return res +} + +func (a *astNode) getToken(tt TokenType) *Token { + if val, ok := a.getSingle(tt.RuleType(), nil).(*Token); ok { + return val + } + return nil +} + +func (a *astNode) getTokens(tt TokenType) []*Token { + var items = a.getItems(tt.RuleType()) + var tokens []*Token + for i := range items { + if val, ok := items[i].(*Token); ok { + tokens = append(tokens, val) + } + } + return tokens +} + +func (t *astBuilder) currentNode() *astNode { + if len(t.stack) > 0 { + return t.stack[len(t.stack)-1] + } + return nil +} + +func newAstNode(rt RuleType) *astNode { + return &astNode{ + ruleType: rt, + subNodes: make(map[RuleType][]interface{}), + } +} + +func NewAstBuilder(newId func() string) AstBuilder { + builder := new(astBuilder) + builder.newId = newId + builder.comments = []*messages.Comment{} + builder.push(newAstNode(RuleTypeNone)) + return builder +} + +func (t *astBuilder) push(n *astNode) { + t.stack = append(t.stack, n) +} + +func (t *astBuilder) pop() *astNode { + x := t.stack[len(t.stack)-1] + t.stack = t.stack[:len(t.stack)-1] + return x +} + +func (t *astBuilder) Build(tok *Token) (bool, error) { + if tok.Type == TokenTypeComment { + comment := &messages.Comment{ + Location: astLocation(tok), + Text: tok.Text, + } + t.comments = append(t.comments, comment) + } else { + t.currentNode().add(tok.Type.RuleType(), tok) + } + return true, nil +} + +func (t *astBuilder) StartRule(r RuleType) (bool, error) { + t.push(newAstNode(r)) + return true, nil +} + +func (t *astBuilder) EndRule(r RuleType) (bool, error) { + node := t.pop() + transformedNode, err := t.transformNode(node) + t.currentNode().add(node.ruleType, transformedNode) + return true, err +} + +func (t *astBuilder) transformNode(node *astNode) (interface{}, error) { + switch node.ruleType { + + case RuleTypeStep: + stepLine := node.getToken(TokenTypeStepLine) + + step := &messages.Step{ + Location: astLocation(stepLine), + Keyword: stepLine.Keyword, + KeywordType: stepLine.KeywordType, + Text: stepLine.Text, + Id: t.newId(), + } + dataTable := node.getSingle(RuleTypeDataTable, nil) + if dataTable != nil { + step.DataTable = dataTable.(*messages.DataTable) + } else { + docString := node.getSingle(RuleTypeDocString, nil) + if docString != nil { + step.DocString = docString.(*messages.DocString) + } + } + + return step, nil + + case RuleTypeDocString: + separatorToken := node.getToken(TokenTypeDocStringSeparator) + lineTokens := node.getTokens(TokenTypeOther) + var text string + for i := range lineTokens { + if i > 0 { + text += "\n" + } + text += lineTokens[i].Text + } + docString := &messages.DocString{ + Location: astLocation(separatorToken), + Content: text, + Delimiter: separatorToken.Keyword, + } + if len(separatorToken.Text) > 0 { + docString.MediaType = separatorToken.Text + } + + return docString, nil + + case RuleTypeDataTable: + rows, err := astTableRows(node, t.newId) + dt := &messages.DataTable{ + Location: rows[0].Location, + Rows: rows, + } + return dt, err + + case RuleTypeBackground: + backgroundLine := node.getToken(TokenTypeBackgroundLine) + bg := &messages.Background{ + Id: t.newId(), + Location: astLocation(backgroundLine), + Keyword: backgroundLine.Keyword, + Name: backgroundLine.Text, + Description: getDescription(node), + Steps: astSteps(node), + } + return bg, nil + + case RuleTypeScenarioDefinition: + scenarioNode := node.getSingle(RuleTypeScenario, nil).(*astNode) + scenarioLine := scenarioNode.getToken(TokenTypeScenarioLine) + tags := astTags(node, t.newId) + sc := &messages.Scenario{ + Id: t.newId(), + Tags: tags, + Location: astLocation(scenarioLine), + Keyword: scenarioLine.Keyword, + Name: scenarioLine.Text, + Description: getDescription(scenarioNode), + Steps: astSteps(scenarioNode), + Examples: astExamples(scenarioNode), + } + + return sc, nil + + case RuleTypeExamplesDefinition: + tags := astTags(node, t.newId) + examplesNode := node.getSingle(RuleTypeExamples, nil).(*astNode) + examplesLine := examplesNode.getToken(TokenTypeExamplesLine) + examplesTable := examplesNode.getSingle(RuleTypeExamplesTable, make([]*messages.TableRow, 0)).([]*messages.TableRow) + + var tableHeader *messages.TableRow + var tableBody []*messages.TableRow + + if len(examplesTable) > 0 { + tableHeader = examplesTable[0] + tableBody = examplesTable[1:] + } else { + tableHeader = nil + tableBody = examplesTable + } + + ex := &messages.Examples{ + Id: t.newId(), + Tags: tags, + Location: astLocation(examplesLine), + Keyword: examplesLine.Keyword, + Name: examplesLine.Text, + Description: getDescription(examplesNode), + TableHeader: tableHeader, + TableBody: tableBody, + } + return ex, nil + + case RuleTypeExamplesTable: + allRows, err := astTableRows(node, t.newId) + return allRows, err + + case RuleTypeDescription: + lineTokens := node.getTokens(TokenTypeOther) + // Trim trailing empty lines + end := len(lineTokens) + for end > 0 && strings.TrimSpace(lineTokens[end-1].Text) == "" { + end-- + } + var desc []string + for i := range lineTokens[0:end] { + desc = append(desc, lineTokens[i].Text) + } + return strings.Join(desc, "\n"), nil + + case RuleTypeFeature: + header := node.getSingle(RuleTypeFeatureHeader, nil).(*astNode) + tags := astTags(header, t.newId) + featureLine := header.getToken(TokenTypeFeatureLine) + if featureLine == nil { + return nil, nil + } + + children := make([]*messages.FeatureChild, 0) + background, _ := node.getSingle(RuleTypeBackground, nil).(*messages.Background) + if background != nil { + children = append(children, &messages.FeatureChild{ + Background: background, + }) + } + scenarios := node.getItems(RuleTypeScenarioDefinition) + for i := range scenarios { + scenario := scenarios[i].(*messages.Scenario) + children = append(children, &messages.FeatureChild{ + Scenario: scenario, + }) + } + rules := node.getItems(RuleTypeRule) + for i := range rules { + rule := rules[i].(*messages.Rule) + children = append(children, &messages.FeatureChild{ + Rule: rule, + }) + } + + feature := &messages.Feature{ + Tags: tags, + Location: astLocation(featureLine), + Language: featureLine.GherkinDialect, + Keyword: featureLine.Keyword, + Name: featureLine.Text, + Description: getDescription(header), + Children: children, + } + return feature, nil + + case RuleTypeRule: + header := node.getSingle(RuleTypeRuleHeader, nil).(*astNode) + ruleLine := header.getToken(TokenTypeRuleLine) + if ruleLine == nil { + return nil, nil + } + + tags := astTags(header, t.newId) + var children []*messages.RuleChild + background, _ := node.getSingle(RuleTypeBackground, nil).(*messages.Background) + + if background != nil { + children = append(children, &messages.RuleChild{ + Background: background, + }) + } + scenarios := node.getItems(RuleTypeScenarioDefinition) + for i := range scenarios { + scenario := scenarios[i].(*messages.Scenario) + children = append(children, &messages.RuleChild{ + Scenario: scenario, + }) + } + + rule := &messages.Rule{ + Id: t.newId(), + Location: astLocation(ruleLine), + Keyword: ruleLine.Keyword, + Name: ruleLine.Text, + Description: getDescription(header), + Children: children, + Tags: tags, + } + return rule, nil + + case RuleTypeGherkinDocument: + feature, _ := node.getSingle(RuleTypeFeature, nil).(*messages.Feature) + + doc := &messages.GherkinDocument{} + if feature != nil { + doc.Feature = feature + } + doc.Comments = t.comments + return doc, nil + } + return node, nil +} + +func getDescription(node *astNode) string { + return node.getSingle(RuleTypeDescription, "").(string) +} + +func astLocation(t *Token) *messages.Location { + return &messages.Location{ + Line: int64(t.Location.Line), + Column: int64(t.Location.Column), + } +} + +func astTableRows(t *astNode, newId func() string) (rows []*messages.TableRow, err error) { + rows = []*messages.TableRow{} + tokens := t.getTokens(TokenTypeTableRow) + for i := range tokens { + row := &messages.TableRow{ + Id: newId(), + Location: astLocation(tokens[i]), + Cells: astTableCells(tokens[i]), + } + rows = append(rows, row) + } + err = ensureCellCount(rows) + return +} + +func ensureCellCount(rows []*messages.TableRow) error { + if len(rows) <= 1 { + return nil + } + cellCount := len(rows[0].Cells) + for i := range rows { + if cellCount != len(rows[i].Cells) { + return &parseError{"inconsistent cell count within the table", &Location{ + Line: int(rows[i].Location.Line), + Column: int(rows[i].Location.Column), + }} + } + } + return nil +} + +func astTableCells(t *Token) (cells []*messages.TableCell) { + cells = []*messages.TableCell{} + for i := range t.Items { + item := t.Items[i] + cell := &messages.TableCell{} + cell.Location = &messages.Location{ + Line: int64(t.Location.Line), + Column: int64(item.Column), + } + cell.Value = item.Text + cells = append(cells, cell) + } + return +} + +func astSteps(t *astNode) (steps []*messages.Step) { + steps = []*messages.Step{} + tokens := t.getItems(RuleTypeStep) + for i := range tokens { + step, _ := tokens[i].(*messages.Step) + steps = append(steps, step) + } + return +} + +func astExamples(t *astNode) (examples []*messages.Examples) { + examples = []*messages.Examples{} + tokens := t.getItems(RuleTypeExamplesDefinition) + for i := range tokens { + example, _ := tokens[i].(*messages.Examples) + examples = append(examples, example) + } + return +} + +func astTags(node *astNode, newId func() string) (tags []*messages.Tag) { + tags = []*messages.Tag{} + tagsNode, ok := node.getSingle(RuleTypeTags, nil).(*astNode) + if !ok { + return + } + tokens := tagsNode.getTokens(TokenTypeTagLine) + for i := range tokens { + token := tokens[i] + for k := range token.Items { + item := token.Items[k] + tag := &messages.Tag{} + tag.Location = &messages.Location{ + Line: int64(token.Location.Line), + Column: int64(item.Column), + } + tag.Name = item.Text + tag.Id = newId() + tags = append(tags, tag) + } + } + return +} diff --git a/vendor/github.com/cucumber/gherkin/go/v26/dialect.go b/vendor/github.com/cucumber/gherkin/go/v26/dialect.go new file mode 100644 index 000000000..212df62b2 --- /dev/null +++ b/vendor/github.com/cucumber/gherkin/go/v26/dialect.go @@ -0,0 +1,58 @@ +package gherkin + +import messages "github.com/cucumber/messages/go/v21" + +type Dialect struct { + Language string + Name string + Native string + Keywords map[string][]string + KeywordTypes map[string]messages.StepKeywordType +} + +func (g *Dialect) FeatureKeywords() []string { + return g.Keywords["feature"] +} + +func (g *Dialect) RuleKeywords() []string { + return g.Keywords["rule"] +} + +func (g *Dialect) ScenarioKeywords() []string { + return g.Keywords["scenario"] +} + +func (g *Dialect) StepKeywords() []string { + result := g.Keywords["given"] + result = append(result, g.Keywords["when"]...) + result = append(result, g.Keywords["then"]...) + result = append(result, g.Keywords["and"]...) + result = append(result, g.Keywords["but"]...) + return result +} + +func (g *Dialect) BackgroundKeywords() []string { + return g.Keywords["background"] +} + +func (g *Dialect) ScenarioOutlineKeywords() []string { + return g.Keywords["scenarioOutline"] +} + +func (g *Dialect) ExamplesKeywords() []string { + return g.Keywords["examples"] +} + +func (g *Dialect) StepKeywordType(keyword string) messages.StepKeywordType { + return g.KeywordTypes[keyword] +} + +type DialectProvider interface { + GetDialect(language string) *Dialect +} + +type gherkinDialectMap map[string]*Dialect + +func (g gherkinDialectMap) GetDialect(language string) *Dialect { + return g[language] +} diff --git a/vendor/github.com/cucumber/gherkin/go/v26/dialects_builtin.go b/vendor/github.com/cucumber/gherkin/go/v26/dialects_builtin.go new file mode 100644 index 000000000..2362612d8 --- /dev/null +++ b/vendor/github.com/cucumber/gherkin/go/v26/dialects_builtin.go @@ -0,0 +1,5229 @@ +package gherkin + +import messages "github.com/cucumber/messages/go/v21" + +// Builtin dialects for af (Afrikaans), am (Armenian), an (Aragonese), ar (Arabic), ast (Asturian), az (Azerbaijani), be (Belarusian), bg (Bulgarian), bm (Malay), bs (Bosnian), ca (Catalan), cs (Czech), cy-GB (Welsh), da (Danish), de (German), el (Greek), em (Emoji), en (English), en-Scouse (Scouse), en-au (Australian), en-lol (LOLCAT), en-old (Old English), en-pirate (Pirate), en-tx (Texas), eo (Esperanto), es (Spanish), et (Estonian), fa (Persian), fi (Finnish), fr (French), ga (Irish), gj (Gujarati), gl (Galician), he (Hebrew), hi (Hindi), hr (Croatian), ht (Creole), hu (Hungarian), id (Indonesian), is (Icelandic), it (Italian), ja (Japanese), jv (Javanese), ka (Georgian), kn (Kannada), ko (Korean), lt (Lithuanian), lu (Luxemburgish), lv (Latvian), mk-Cyrl (Macedonian), mk-Latn (Macedonian (Latin)), mn (Mongolian), ne (Nepali), nl (Dutch), no (Norwegian), pa (Panjabi), pl (Polish), pt (Portuguese), ro (Romanian), ru (Russian), sk (Slovak), sl (Slovenian), sr-Cyrl (Serbian), sr-Latn (Serbian (Latin)), sv (Swedish), ta (Tamil), th (Thai), te (Telugu), tlh (Klingon), tr (Turkish), tt (Tatar), uk (Ukrainian), ur (Urdu), uz (Uzbek), vi (Vietnamese), zh-CN (Chinese simplified), zh-TW (Chinese traditional), mr (Marathi), amh (Amharic) +func DialectsBuiltin() DialectProvider { + return builtinDialects +} + +const ( + feature = "feature" + rule = "rule" + background = "background" + scenario = "scenario" + scenarioOutline = "scenarioOutline" + examples = "examples" + given = "given" + when = "when" + then = "then" + and = "and" + but = "but" +) + +var builtinDialects = gherkinDialectMap{ + "af": &Dialect{ + "af", "Afrikaans", "Afrikaans", map[string][]string{ + feature: { + "Funksie", + "Besigheid Behoefte", + "Vermoë", + }, + rule: { + "Regel", + }, + background: { + "Agtergrond", + }, + scenario: { + "Voorbeeld", + "Situasie", + }, + scenarioOutline: { + "Situasie Uiteensetting", + }, + examples: { + "Voorbeelde", + }, + given: { + "* ", + "Gegewe ", + }, + when: { + "* ", + "Wanneer ", + }, + then: { + "* ", + "Dan ", + }, + and: { + "* ", + "En ", + }, + but: { + "* ", + "Maar ", + }, + }, + map[string]messages.StepKeywordType{ + "Gegewe ": messages.StepKeywordType_CONTEXT, + + "Wanneer ": messages.StepKeywordType_ACTION, + + "Dan ": messages.StepKeywordType_OUTCOME, + + "En ": messages.StepKeywordType_CONJUNCTION, + + "Maar ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "am": &Dialect{ + "am", "Armenian", "հայերեն", map[string][]string{ + feature: { + "Ֆունկցիոնալություն", + "Հատկություն", + }, + rule: { + "Rule", + }, + background: { + "Կոնտեքստ", + }, + scenario: { + "Օրինակ", + "Սցենար", + }, + scenarioOutline: { + "Սցենարի կառուցվացքը", + }, + examples: { + "Օրինակներ", + }, + given: { + "* ", + "Դիցուք ", + }, + when: { + "* ", + "Եթե ", + "Երբ ", + }, + then: { + "* ", + "Ապա ", + }, + and: { + "* ", + "Եվ ", + }, + but: { + "* ", + "Բայց ", + }, + }, + map[string]messages.StepKeywordType{ + "Դիցուք ": messages.StepKeywordType_CONTEXT, + + "Եթե ": messages.StepKeywordType_ACTION, + + "Երբ ": messages.StepKeywordType_ACTION, + + "Ապա ": messages.StepKeywordType_OUTCOME, + + "Եվ ": messages.StepKeywordType_CONJUNCTION, + + "Բայց ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "an": &Dialect{ + "an", "Aragonese", "Aragonés", map[string][]string{ + feature: { + "Caracteristica", + }, + rule: { + "Rule", + }, + background: { + "Antecedents", + }, + scenario: { + "Eixemplo", + "Caso", + }, + scenarioOutline: { + "Esquema del caso", + }, + examples: { + "Eixemplos", + }, + given: { + "* ", + "Dau ", + "Dada ", + "Daus ", + "Dadas ", + }, + when: { + "* ", + "Cuan ", + }, + then: { + "* ", + "Alavez ", + "Allora ", + "Antonces ", + }, + and: { + "* ", + "Y ", + "E ", + }, + but: { + "* ", + "Pero ", + }, + }, + map[string]messages.StepKeywordType{ + "Dau ": messages.StepKeywordType_CONTEXT, + + "Dada ": messages.StepKeywordType_CONTEXT, + + "Daus ": messages.StepKeywordType_CONTEXT, + + "Dadas ": messages.StepKeywordType_CONTEXT, + + "Cuan ": messages.StepKeywordType_ACTION, + + "Alavez ": messages.StepKeywordType_OUTCOME, + + "Allora ": messages.StepKeywordType_OUTCOME, + + "Antonces ": messages.StepKeywordType_OUTCOME, + + "Y ": messages.StepKeywordType_CONJUNCTION, + + "E ": messages.StepKeywordType_CONJUNCTION, + + "Pero ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "ar": &Dialect{ + "ar", "Arabic", "العربية", map[string][]string{ + feature: { + "خاصية", + }, + rule: { + "Rule", + }, + background: { + "الخلفية", + }, + scenario: { + "مثال", + "سيناريو", + }, + scenarioOutline: { + "سيناريو مخطط", + }, + examples: { + "امثلة", + }, + given: { + "* ", + "بفرض ", + }, + when: { + "* ", + "متى ", + "عندما ", + }, + then: { + "* ", + "اذاً ", + "ثم ", + }, + and: { + "* ", + "و ", + }, + but: { + "* ", + "لكن ", + }, + }, + map[string]messages.StepKeywordType{ + "بفرض ": messages.StepKeywordType_CONTEXT, + + "متى ": messages.StepKeywordType_ACTION, + + "عندما ": messages.StepKeywordType_ACTION, + + "اذاً ": messages.StepKeywordType_OUTCOME, + + "ثم ": messages.StepKeywordType_OUTCOME, + + "و ": messages.StepKeywordType_CONJUNCTION, + + "لكن ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "ast": &Dialect{ + "ast", "Asturian", "asturianu", map[string][]string{ + feature: { + "Carauterística", + }, + rule: { + "Rule", + }, + background: { + "Antecedentes", + }, + scenario: { + "Exemplo", + "Casu", + }, + scenarioOutline: { + "Esbozu del casu", + }, + examples: { + "Exemplos", + }, + given: { + "* ", + "Dáu ", + "Dada ", + "Daos ", + "Daes ", + }, + when: { + "* ", + "Cuando ", + }, + then: { + "* ", + "Entós ", + }, + and: { + "* ", + "Y ", + "Ya ", + }, + but: { + "* ", + "Peru ", + }, + }, + map[string]messages.StepKeywordType{ + "Dáu ": messages.StepKeywordType_CONTEXT, + + "Dada ": messages.StepKeywordType_CONTEXT, + + "Daos ": messages.StepKeywordType_CONTEXT, + + "Daes ": messages.StepKeywordType_CONTEXT, + + "Cuando ": messages.StepKeywordType_ACTION, + + "Entós ": messages.StepKeywordType_OUTCOME, + + "Y ": messages.StepKeywordType_CONJUNCTION, + + "Ya ": messages.StepKeywordType_CONJUNCTION, + + "Peru ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "az": &Dialect{ + "az", "Azerbaijani", "Azərbaycanca", map[string][]string{ + feature: { + "Özəllik", + }, + rule: { + "Rule", + }, + background: { + "Keçmiş", + "Kontekst", + }, + scenario: { + "Nümunə", + "Ssenari", + }, + scenarioOutline: { + "Ssenarinin strukturu", + }, + examples: { + "Nümunələr", + }, + given: { + "* ", + "Tutaq ki ", + "Verilir ", + }, + when: { + "* ", + "Əgər ", + "Nə vaxt ki ", + }, + then: { + "* ", + "O halda ", + }, + and: { + "* ", + "Və ", + "Həm ", + }, + but: { + "* ", + "Amma ", + "Ancaq ", + }, + }, + map[string]messages.StepKeywordType{ + "Tutaq ki ": messages.StepKeywordType_CONTEXT, + + "Verilir ": messages.StepKeywordType_CONTEXT, + + "Əgər ": messages.StepKeywordType_ACTION, + + "Nə vaxt ki ": messages.StepKeywordType_ACTION, + + "O halda ": messages.StepKeywordType_OUTCOME, + + "Və ": messages.StepKeywordType_CONJUNCTION, + + "Həm ": messages.StepKeywordType_CONJUNCTION, + + "Amma ": messages.StepKeywordType_CONJUNCTION, + + "Ancaq ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "be": &Dialect{ + "be", "Belarusian", "Беларуская", map[string][]string{ + feature: { + "Функцыянальнасць", + "Фіча", + }, + rule: { + "Правілы", + }, + background: { + "Кантэкст", + }, + scenario: { + "Сцэнарый", + "Cцэнар", + }, + scenarioOutline: { + "Шаблон сцэнарыя", + "Узор сцэнара", + }, + examples: { + "Прыклады", + }, + given: { + "* ", + "Няхай ", + "Дадзена ", + }, + when: { + "* ", + "Калі ", + }, + then: { + "* ", + "Тады ", + }, + and: { + "* ", + "I ", + "Ды ", + "Таксама ", + }, + but: { + "* ", + "Але ", + "Інакш ", + }, + }, + map[string]messages.StepKeywordType{ + "Няхай ": messages.StepKeywordType_CONTEXT, + + "Дадзена ": messages.StepKeywordType_CONTEXT, + + "Калі ": messages.StepKeywordType_ACTION, + + "Тады ": messages.StepKeywordType_OUTCOME, + + "I ": messages.StepKeywordType_CONJUNCTION, + + "Ды ": messages.StepKeywordType_CONJUNCTION, + + "Таксама ": messages.StepKeywordType_CONJUNCTION, + + "Але ": messages.StepKeywordType_CONJUNCTION, + + "Інакш ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "bg": &Dialect{ + "bg", "Bulgarian", "български", map[string][]string{ + feature: { + "Функционалност", + }, + rule: { + "Правило", + }, + background: { + "Предистория", + }, + scenario: { + "Пример", + "Сценарий", + }, + scenarioOutline: { + "Рамка на сценарий", + }, + examples: { + "Примери", + }, + given: { + "* ", + "Дадено ", + }, + when: { + "* ", + "Когато ", + }, + then: { + "* ", + "То ", + }, + and: { + "* ", + "И ", + }, + but: { + "* ", + "Но ", + }, + }, + map[string]messages.StepKeywordType{ + "Дадено ": messages.StepKeywordType_CONTEXT, + + "Когато ": messages.StepKeywordType_ACTION, + + "То ": messages.StepKeywordType_OUTCOME, + + "И ": messages.StepKeywordType_CONJUNCTION, + + "Но ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "bm": &Dialect{ + "bm", "Malay", "Bahasa Melayu", map[string][]string{ + feature: { + "Fungsi", + }, + rule: { + "Rule", + }, + background: { + "Latar Belakang", + }, + scenario: { + "Senario", + "Situasi", + "Keadaan", + }, + scenarioOutline: { + "Kerangka Senario", + "Kerangka Situasi", + "Kerangka Keadaan", + "Garis Panduan Senario", + }, + examples: { + "Contoh", + }, + given: { + "* ", + "Diberi ", + "Bagi ", + }, + when: { + "* ", + "Apabila ", + }, + then: { + "* ", + "Maka ", + "Kemudian ", + }, + and: { + "* ", + "Dan ", + }, + but: { + "* ", + "Tetapi ", + "Tapi ", + }, + }, + map[string]messages.StepKeywordType{ + "Diberi ": messages.StepKeywordType_CONTEXT, + + "Bagi ": messages.StepKeywordType_CONTEXT, + + "Apabila ": messages.StepKeywordType_ACTION, + + "Maka ": messages.StepKeywordType_OUTCOME, + + "Kemudian ": messages.StepKeywordType_OUTCOME, + + "Dan ": messages.StepKeywordType_CONJUNCTION, + + "Tetapi ": messages.StepKeywordType_CONJUNCTION, + + "Tapi ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "bs": &Dialect{ + "bs", "Bosnian", "Bosanski", map[string][]string{ + feature: { + "Karakteristika", + }, + rule: { + "Rule", + }, + background: { + "Pozadina", + }, + scenario: { + "Primjer", + "Scenariju", + "Scenario", + }, + scenarioOutline: { + "Scenariju-obris", + "Scenario-outline", + }, + examples: { + "Primjeri", + }, + given: { + "* ", + "Dato ", + }, + when: { + "* ", + "Kada ", + }, + then: { + "* ", + "Zatim ", + }, + and: { + "* ", + "I ", + "A ", + }, + but: { + "* ", + "Ali ", + }, + }, + map[string]messages.StepKeywordType{ + "Dato ": messages.StepKeywordType_CONTEXT, + + "Kada ": messages.StepKeywordType_ACTION, + + "Zatim ": messages.StepKeywordType_OUTCOME, + + "I ": messages.StepKeywordType_CONJUNCTION, + + "A ": messages.StepKeywordType_CONJUNCTION, + + "Ali ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "ca": &Dialect{ + "ca", "Catalan", "català", map[string][]string{ + feature: { + "Característica", + "Funcionalitat", + }, + rule: { + "Rule", + }, + background: { + "Rerefons", + "Antecedents", + }, + scenario: { + "Exemple", + "Escenari", + }, + scenarioOutline: { + "Esquema de l'escenari", + }, + examples: { + "Exemples", + }, + given: { + "* ", + "Donat ", + "Donada ", + "Atès ", + "Atesa ", + }, + when: { + "* ", + "Quan ", + }, + then: { + "* ", + "Aleshores ", + "Cal ", + }, + and: { + "* ", + "I ", + }, + but: { + "* ", + "Però ", + }, + }, + map[string]messages.StepKeywordType{ + "Donat ": messages.StepKeywordType_CONTEXT, + + "Donada ": messages.StepKeywordType_CONTEXT, + + "Atès ": messages.StepKeywordType_CONTEXT, + + "Atesa ": messages.StepKeywordType_CONTEXT, + + "Quan ": messages.StepKeywordType_ACTION, + + "Aleshores ": messages.StepKeywordType_OUTCOME, + + "Cal ": messages.StepKeywordType_OUTCOME, + + "I ": messages.StepKeywordType_CONJUNCTION, + + "Però ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "cs": &Dialect{ + "cs", "Czech", "Česky", map[string][]string{ + feature: { + "Požadavek", + }, + rule: { + "Pravidlo", + }, + background: { + "Pozadí", + "Kontext", + }, + scenario: { + "Příklad", + "Scénář", + }, + scenarioOutline: { + "Náčrt Scénáře", + "Osnova scénáře", + }, + examples: { + "Příklady", + }, + given: { + "* ", + "Pokud ", + "Za předpokladu ", + }, + when: { + "* ", + "Když ", + }, + then: { + "* ", + "Pak ", + }, + and: { + "* ", + "A také ", + "A ", + }, + but: { + "* ", + "Ale ", + }, + }, + map[string]messages.StepKeywordType{ + "Pokud ": messages.StepKeywordType_CONTEXT, + + "Za předpokladu ": messages.StepKeywordType_CONTEXT, + + "Když ": messages.StepKeywordType_ACTION, + + "Pak ": messages.StepKeywordType_OUTCOME, + + "A také ": messages.StepKeywordType_CONJUNCTION, + + "A ": messages.StepKeywordType_CONJUNCTION, + + "Ale ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "cy-GB": &Dialect{ + "cy-GB", "Welsh", "Cymraeg", map[string][]string{ + feature: { + "Arwedd", + }, + rule: { + "Rule", + }, + background: { + "Cefndir", + }, + scenario: { + "Enghraifft", + "Scenario", + }, + scenarioOutline: { + "Scenario Amlinellol", + }, + examples: { + "Enghreifftiau", + }, + given: { + "* ", + "Anrhegedig a ", + }, + when: { + "* ", + "Pryd ", + }, + then: { + "* ", + "Yna ", + }, + and: { + "* ", + "A ", + }, + but: { + "* ", + "Ond ", + }, + }, + map[string]messages.StepKeywordType{ + "Anrhegedig a ": messages.StepKeywordType_CONTEXT, + + "Pryd ": messages.StepKeywordType_ACTION, + + "Yna ": messages.StepKeywordType_OUTCOME, + + "A ": messages.StepKeywordType_CONJUNCTION, + + "Ond ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "da": &Dialect{ + "da", "Danish", "dansk", map[string][]string{ + feature: { + "Egenskab", + }, + rule: { + "Rule", + }, + background: { + "Baggrund", + }, + scenario: { + "Eksempel", + "Scenarie", + }, + scenarioOutline: { + "Abstrakt Scenario", + }, + examples: { + "Eksempler", + }, + given: { + "* ", + "Givet ", + }, + when: { + "* ", + "Når ", + }, + then: { + "* ", + "Så ", + }, + and: { + "* ", + "Og ", + }, + but: { + "* ", + "Men ", + }, + }, + map[string]messages.StepKeywordType{ + "Givet ": messages.StepKeywordType_CONTEXT, + + "Når ": messages.StepKeywordType_ACTION, + + "Så ": messages.StepKeywordType_OUTCOME, + + "Og ": messages.StepKeywordType_CONJUNCTION, + + "Men ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "de": &Dialect{ + "de", "German", "Deutsch", map[string][]string{ + feature: { + "Funktionalität", + "Funktion", + }, + rule: { + "Rule", + "Regel", + }, + background: { + "Grundlage", + "Hintergrund", + "Voraussetzungen", + "Vorbedingungen", + }, + scenario: { + "Beispiel", + "Szenario", + }, + scenarioOutline: { + "Szenariogrundriss", + "Szenarien", + }, + examples: { + "Beispiele", + }, + given: { + "* ", + "Angenommen ", + "Gegeben sei ", + "Gegeben seien ", + }, + when: { + "* ", + "Wenn ", + }, + then: { + "* ", + "Dann ", + }, + and: { + "* ", + "Und ", + }, + but: { + "* ", + "Aber ", + }, + }, + map[string]messages.StepKeywordType{ + "Angenommen ": messages.StepKeywordType_CONTEXT, + + "Gegeben sei ": messages.StepKeywordType_CONTEXT, + + "Gegeben seien ": messages.StepKeywordType_CONTEXT, + + "Wenn ": messages.StepKeywordType_ACTION, + + "Dann ": messages.StepKeywordType_OUTCOME, + + "Und ": messages.StepKeywordType_CONJUNCTION, + + "Aber ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "el": &Dialect{ + "el", "Greek", "Ελληνικά", map[string][]string{ + feature: { + "Δυνατότητα", + "Λειτουργία", + }, + rule: { + "Rule", + }, + background: { + "Υπόβαθρο", + }, + scenario: { + "Παράδειγμα", + "Σενάριο", + }, + scenarioOutline: { + "Περιγραφή Σεναρίου", + "Περίγραμμα Σεναρίου", + }, + examples: { + "Παραδείγματα", + "Σενάρια", + }, + given: { + "* ", + "Δεδομένου ", + }, + when: { + "* ", + "Όταν ", + }, + then: { + "* ", + "Τότε ", + }, + and: { + "* ", + "Και ", + }, + but: { + "* ", + "Αλλά ", + }, + }, + map[string]messages.StepKeywordType{ + "Δεδομένου ": messages.StepKeywordType_CONTEXT, + + "Όταν ": messages.StepKeywordType_ACTION, + + "Τότε ": messages.StepKeywordType_OUTCOME, + + "Και ": messages.StepKeywordType_CONJUNCTION, + + "Αλλά ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "em": &Dialect{ + "em", "Emoji", "😀", map[string][]string{ + feature: { + "📚", + }, + rule: { + "Rule", + }, + background: { + "💤", + }, + scenario: { + "🥒", + "📕", + }, + scenarioOutline: { + "📖", + }, + examples: { + "📓", + }, + given: { + "* ", + "😐", + }, + when: { + "* ", + "🎬", + }, + then: { + "* ", + "🙏", + }, + and: { + "* ", + "😂", + }, + but: { + "* ", + "😔", + }, + }, + map[string]messages.StepKeywordType{ + "😐": messages.StepKeywordType_CONTEXT, + + "🎬": messages.StepKeywordType_ACTION, + + "🙏": messages.StepKeywordType_OUTCOME, + + "😂": messages.StepKeywordType_CONJUNCTION, + + "😔": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "en": &Dialect{ + "en", "English", "English", map[string][]string{ + feature: { + "Feature", + "Business Need", + "Ability", + }, + rule: { + "Rule", + }, + background: { + "Background", + }, + scenario: { + "Example", + "Scenario", + }, + scenarioOutline: { + "Scenario Outline", + "Scenario Template", + }, + examples: { + "Examples", + "Scenarios", + }, + given: { + "* ", + "Given ", + }, + when: { + "* ", + "When ", + }, + then: { + "* ", + "Then ", + }, + and: { + "* ", + "And ", + }, + but: { + "* ", + "But ", + }, + }, + map[string]messages.StepKeywordType{ + "Given ": messages.StepKeywordType_CONTEXT, + + "When ": messages.StepKeywordType_ACTION, + + "Then ": messages.StepKeywordType_OUTCOME, + + "And ": messages.StepKeywordType_CONJUNCTION, + + "But ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "en-Scouse": &Dialect{ + "en-Scouse", "Scouse", "Scouse", map[string][]string{ + feature: { + "Feature", + }, + rule: { + "Rule", + }, + background: { + "Dis is what went down", + }, + scenario: { + "The thing of it is", + }, + scenarioOutline: { + "Wharrimean is", + }, + examples: { + "Examples", + }, + given: { + "* ", + "Givun ", + "Youse know when youse got ", + }, + when: { + "* ", + "Wun ", + "Youse know like when ", + }, + then: { + "* ", + "Dun ", + "Den youse gotta ", + }, + and: { + "* ", + "An ", + }, + but: { + "* ", + "Buh ", + }, + }, + map[string]messages.StepKeywordType{ + "Givun ": messages.StepKeywordType_CONTEXT, + + "Youse know when youse got ": messages.StepKeywordType_CONTEXT, + + "Wun ": messages.StepKeywordType_ACTION, + + "Youse know like when ": messages.StepKeywordType_ACTION, + + "Dun ": messages.StepKeywordType_OUTCOME, + + "Den youse gotta ": messages.StepKeywordType_OUTCOME, + + "An ": messages.StepKeywordType_CONJUNCTION, + + "Buh ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "en-au": &Dialect{ + "en-au", "Australian", "Australian", map[string][]string{ + feature: { + "Pretty much", + }, + rule: { + "Rule", + }, + background: { + "First off", + }, + scenario: { + "Awww, look mate", + }, + scenarioOutline: { + "Reckon it's like", + }, + examples: { + "You'll wanna", + }, + given: { + "* ", + "Y'know ", + }, + when: { + "* ", + "It's just unbelievable ", + }, + then: { + "* ", + "But at the end of the day I reckon ", + }, + and: { + "* ", + "Too right ", + }, + but: { + "* ", + "Yeah nah ", + }, + }, + map[string]messages.StepKeywordType{ + "Y'know ": messages.StepKeywordType_CONTEXT, + + "It's just unbelievable ": messages.StepKeywordType_ACTION, + + "But at the end of the day I reckon ": messages.StepKeywordType_OUTCOME, + + "Too right ": messages.StepKeywordType_CONJUNCTION, + + "Yeah nah ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "en-lol": &Dialect{ + "en-lol", "LOLCAT", "LOLCAT", map[string][]string{ + feature: { + "OH HAI", + }, + rule: { + "Rule", + }, + background: { + "B4", + }, + scenario: { + "MISHUN", + }, + scenarioOutline: { + "MISHUN SRSLY", + }, + examples: { + "EXAMPLZ", + }, + given: { + "* ", + "I CAN HAZ ", + }, + when: { + "* ", + "WEN ", + }, + then: { + "* ", + "DEN ", + }, + and: { + "* ", + "AN ", + }, + but: { + "* ", + "BUT ", + }, + }, + map[string]messages.StepKeywordType{ + "I CAN HAZ ": messages.StepKeywordType_CONTEXT, + + "WEN ": messages.StepKeywordType_ACTION, + + "DEN ": messages.StepKeywordType_OUTCOME, + + "AN ": messages.StepKeywordType_CONJUNCTION, + + "BUT ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "en-old": &Dialect{ + "en-old", "Old English", "Englisc", map[string][]string{ + feature: { + "Hwaet", + "Hwæt", + }, + rule: { + "Rule", + }, + background: { + "Aer", + "Ær", + }, + scenario: { + "Swa", + }, + scenarioOutline: { + "Swa hwaer swa", + "Swa hwær swa", + }, + examples: { + "Se the", + "Se þe", + "Se ðe", + }, + given: { + "* ", + "Thurh ", + "Þurh ", + "Ðurh ", + }, + when: { + "* ", + "Bæþsealf ", + "Bæþsealfa ", + "Bæþsealfe ", + "Ciricæw ", + "Ciricæwe ", + "Ciricæwa ", + }, + then: { + "* ", + "Tha ", + "Þa ", + "Ða ", + "Tha the ", + "Þa þe ", + "Ða ðe ", + }, + and: { + "* ", + "Ond ", + "7 ", + }, + but: { + "* ", + "Ac ", + }, + }, + map[string]messages.StepKeywordType{ + "Thurh ": messages.StepKeywordType_CONTEXT, + + "Þurh ": messages.StepKeywordType_CONTEXT, + + "Ðurh ": messages.StepKeywordType_CONTEXT, + + "Bæþsealf ": messages.StepKeywordType_ACTION, + + "Bæþsealfa ": messages.StepKeywordType_ACTION, + + "Bæþsealfe ": messages.StepKeywordType_ACTION, + + "Ciricæw ": messages.StepKeywordType_ACTION, + + "Ciricæwe ": messages.StepKeywordType_ACTION, + + "Ciricæwa ": messages.StepKeywordType_ACTION, + + "Tha ": messages.StepKeywordType_OUTCOME, + + "Þa ": messages.StepKeywordType_OUTCOME, + + "Ða ": messages.StepKeywordType_OUTCOME, + + "Tha the ": messages.StepKeywordType_OUTCOME, + + "Þa þe ": messages.StepKeywordType_OUTCOME, + + "Ða ðe ": messages.StepKeywordType_OUTCOME, + + "Ond ": messages.StepKeywordType_CONJUNCTION, + + "7 ": messages.StepKeywordType_CONJUNCTION, + + "Ac ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "en-pirate": &Dialect{ + "en-pirate", "Pirate", "Pirate", map[string][]string{ + feature: { + "Ahoy matey!", + }, + rule: { + "Rule", + }, + background: { + "Yo-ho-ho", + }, + scenario: { + "Heave to", + }, + scenarioOutline: { + "Shiver me timbers", + }, + examples: { + "Dead men tell no tales", + }, + given: { + "* ", + "Gangway! ", + }, + when: { + "* ", + "Blimey! ", + }, + then: { + "* ", + "Let go and haul ", + }, + and: { + "* ", + "Aye ", + }, + but: { + "* ", + "Avast! ", + }, + }, + map[string]messages.StepKeywordType{ + "Gangway! ": messages.StepKeywordType_CONTEXT, + + "Blimey! ": messages.StepKeywordType_ACTION, + + "Let go and haul ": messages.StepKeywordType_OUTCOME, + + "Aye ": messages.StepKeywordType_CONJUNCTION, + + "Avast! ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "en-tx": &Dialect{ + "en-tx", "Texas", "Texas", map[string][]string{ + feature: { + "This ain’t my first rodeo", + "All gussied up", + }, + rule: { + "Rule ", + }, + background: { + "Lemme tell y'all a story", + }, + scenario: { + "All hat and no cattle", + }, + scenarioOutline: { + "Serious as a snake bite", + "Busy as a hound in flea season", + }, + examples: { + "Now that's a story longer than a cattle drive in July", + }, + given: { + "Fixin' to ", + "All git out ", + }, + when: { + "Quick out of the chute ", + }, + then: { + "There’s no tree but bears some fruit ", + }, + and: { + "Come hell or high water ", + }, + but: { + "Well now hold on, I'll you what ", + }, + }, + map[string]messages.StepKeywordType{ + "Fixin' to ": messages.StepKeywordType_CONTEXT, + + "All git out ": messages.StepKeywordType_CONTEXT, + + "Quick out of the chute ": messages.StepKeywordType_ACTION, + + "There’s no tree but bears some fruit ": messages.StepKeywordType_OUTCOME, + + "Come hell or high water ": messages.StepKeywordType_CONJUNCTION, + + "Well now hold on, I'll you what ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "eo": &Dialect{ + "eo", "Esperanto", "Esperanto", map[string][]string{ + feature: { + "Trajto", + }, + rule: { + "Rule", + }, + background: { + "Fono", + }, + scenario: { + "Ekzemplo", + "Scenaro", + "Kazo", + }, + scenarioOutline: { + "Konturo de la scenaro", + "Skizo", + "Kazo-skizo", + }, + examples: { + "Ekzemploj", + }, + given: { + "* ", + "Donitaĵo ", + "Komence ", + }, + when: { + "* ", + "Se ", + }, + then: { + "* ", + "Do ", + }, + and: { + "* ", + "Kaj ", + }, + but: { + "* ", + "Sed ", + }, + }, + map[string]messages.StepKeywordType{ + "Donitaĵo ": messages.StepKeywordType_CONTEXT, + + "Komence ": messages.StepKeywordType_CONTEXT, + + "Se ": messages.StepKeywordType_ACTION, + + "Do ": messages.StepKeywordType_OUTCOME, + + "Kaj ": messages.StepKeywordType_CONJUNCTION, + + "Sed ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "es": &Dialect{ + "es", "Spanish", "español", map[string][]string{ + feature: { + "Característica", + "Necesidad del negocio", + "Requisito", + }, + rule: { + "Regla", + "Regla de negocio", + }, + background: { + "Antecedentes", + }, + scenario: { + "Ejemplo", + "Escenario", + }, + scenarioOutline: { + "Esquema del escenario", + }, + examples: { + "Ejemplos", + }, + given: { + "* ", + "Dado ", + "Dada ", + "Dados ", + "Dadas ", + }, + when: { + "* ", + "Cuando ", + }, + then: { + "* ", + "Entonces ", + }, + and: { + "* ", + "Y ", + "E ", + }, + but: { + "* ", + "Pero ", + }, + }, + map[string]messages.StepKeywordType{ + "Dado ": messages.StepKeywordType_CONTEXT, + + "Dada ": messages.StepKeywordType_CONTEXT, + + "Dados ": messages.StepKeywordType_CONTEXT, + + "Dadas ": messages.StepKeywordType_CONTEXT, + + "Cuando ": messages.StepKeywordType_ACTION, + + "Entonces ": messages.StepKeywordType_OUTCOME, + + "Y ": messages.StepKeywordType_CONJUNCTION, + + "E ": messages.StepKeywordType_CONJUNCTION, + + "Pero ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "et": &Dialect{ + "et", "Estonian", "eesti keel", map[string][]string{ + feature: { + "Omadus", + }, + rule: { + "Reegel", + }, + background: { + "Taust", + }, + scenario: { + "Juhtum", + "Stsenaarium", + }, + scenarioOutline: { + "Raamjuhtum", + "Raamstsenaarium", + }, + examples: { + "Juhtumid", + }, + given: { + "* ", + "Eeldades ", + }, + when: { + "* ", + "Kui ", + }, + then: { + "* ", + "Siis ", + }, + and: { + "* ", + "Ja ", + }, + but: { + "* ", + "Kuid ", + }, + }, + map[string]messages.StepKeywordType{ + "Eeldades ": messages.StepKeywordType_CONTEXT, + + "Kui ": messages.StepKeywordType_ACTION, + + "Siis ": messages.StepKeywordType_OUTCOME, + + "Ja ": messages.StepKeywordType_CONJUNCTION, + + "Kuid ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "fa": &Dialect{ + "fa", "Persian", "فارسی", map[string][]string{ + feature: { + "وِیژگی", + }, + rule: { + "Rule", + }, + background: { + "زمینه", + }, + scenario: { + "مثال", + "سناریو", + }, + scenarioOutline: { + "الگوی سناریو", + }, + examples: { + "نمونه ها", + }, + given: { + "* ", + "با فرض ", + }, + when: { + "* ", + "هنگامی ", + }, + then: { + "* ", + "آنگاه ", + }, + and: { + "* ", + "و ", + }, + but: { + "* ", + "اما ", + }, + }, + map[string]messages.StepKeywordType{ + "با فرض ": messages.StepKeywordType_CONTEXT, + + "هنگامی ": messages.StepKeywordType_ACTION, + + "آنگاه ": messages.StepKeywordType_OUTCOME, + + "و ": messages.StepKeywordType_CONJUNCTION, + + "اما ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "fi": &Dialect{ + "fi", "Finnish", "suomi", map[string][]string{ + feature: { + "Ominaisuus", + }, + rule: { + "Rule", + }, + background: { + "Tausta", + }, + scenario: { + "Tapaus", + }, + scenarioOutline: { + "Tapausaihio", + }, + examples: { + "Tapaukset", + }, + given: { + "* ", + "Oletetaan ", + }, + when: { + "* ", + "Kun ", + }, + then: { + "* ", + "Niin ", + }, + and: { + "* ", + "Ja ", + }, + but: { + "* ", + "Mutta ", + }, + }, + map[string]messages.StepKeywordType{ + "Oletetaan ": messages.StepKeywordType_CONTEXT, + + "Kun ": messages.StepKeywordType_ACTION, + + "Niin ": messages.StepKeywordType_OUTCOME, + + "Ja ": messages.StepKeywordType_CONJUNCTION, + + "Mutta ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "fr": &Dialect{ + "fr", "French", "français", map[string][]string{ + feature: { + "Fonctionnalité", + }, + rule: { + "Règle", + }, + background: { + "Contexte", + }, + scenario: { + "Exemple", + "Scénario", + }, + scenarioOutline: { + "Plan du scénario", + "Plan du Scénario", + }, + examples: { + "Exemples", + }, + given: { + "* ", + "Soit ", + "Sachant que ", + "Sachant qu'", + "Sachant ", + "Etant donné que ", + "Etant donné qu'", + "Etant donné ", + "Etant donnée ", + "Etant donnés ", + "Etant données ", + "Étant donné que ", + "Étant donné qu'", + "Étant donné ", + "Étant donnée ", + "Étant donnés ", + "Étant données ", + }, + when: { + "* ", + "Quand ", + "Lorsque ", + "Lorsqu'", + }, + then: { + "* ", + "Alors ", + "Donc ", + }, + and: { + "* ", + "Et que ", + "Et qu'", + "Et ", + }, + but: { + "* ", + "Mais que ", + "Mais qu'", + "Mais ", + }, + }, + map[string]messages.StepKeywordType{ + "Soit ": messages.StepKeywordType_CONTEXT, + + "Sachant que ": messages.StepKeywordType_CONTEXT, + + "Sachant qu'": messages.StepKeywordType_CONTEXT, + + "Sachant ": messages.StepKeywordType_CONTEXT, + + "Etant donné que ": messages.StepKeywordType_CONTEXT, + + "Etant donné qu'": messages.StepKeywordType_CONTEXT, + + "Etant donné ": messages.StepKeywordType_CONTEXT, + + "Etant donnée ": messages.StepKeywordType_CONTEXT, + + "Etant donnés ": messages.StepKeywordType_CONTEXT, + + "Etant données ": messages.StepKeywordType_CONTEXT, + + "Étant donné que ": messages.StepKeywordType_CONTEXT, + + "Étant donné qu'": messages.StepKeywordType_CONTEXT, + + "Étant donné ": messages.StepKeywordType_CONTEXT, + + "Étant donnée ": messages.StepKeywordType_CONTEXT, + + "Étant donnés ": messages.StepKeywordType_CONTEXT, + + "Étant données ": messages.StepKeywordType_CONTEXT, + + "Quand ": messages.StepKeywordType_ACTION, + + "Lorsque ": messages.StepKeywordType_ACTION, + + "Lorsqu'": messages.StepKeywordType_ACTION, + + "Alors ": messages.StepKeywordType_OUTCOME, + + "Donc ": messages.StepKeywordType_OUTCOME, + + "Et que ": messages.StepKeywordType_CONJUNCTION, + + "Et qu'": messages.StepKeywordType_CONJUNCTION, + + "Et ": messages.StepKeywordType_CONJUNCTION, + + "Mais que ": messages.StepKeywordType_CONJUNCTION, + + "Mais qu'": messages.StepKeywordType_CONJUNCTION, + + "Mais ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "ga": &Dialect{ + "ga", "Irish", "Gaeilge", map[string][]string{ + feature: { + "Gné", + }, + rule: { + "Rule", + }, + background: { + "Cúlra", + }, + scenario: { + "Sampla", + "Cás", + }, + scenarioOutline: { + "Cás Achomair", + }, + examples: { + "Samplaí", + }, + given: { + "* ", + "Cuir i gcás go", + "Cuir i gcás nach", + "Cuir i gcás gur", + "Cuir i gcás nár", + }, + when: { + "* ", + "Nuair a", + "Nuair nach", + "Nuair ba", + "Nuair nár", + }, + then: { + "* ", + "Ansin", + }, + and: { + "* ", + "Agus", + }, + but: { + "* ", + "Ach", + }, + }, + map[string]messages.StepKeywordType{ + "Cuir i gcás go": messages.StepKeywordType_CONTEXT, + + "Cuir i gcás nach": messages.StepKeywordType_CONTEXT, + + "Cuir i gcás gur": messages.StepKeywordType_CONTEXT, + + "Cuir i gcás nár": messages.StepKeywordType_CONTEXT, + + "Nuair a": messages.StepKeywordType_ACTION, + + "Nuair nach": messages.StepKeywordType_ACTION, + + "Nuair ba": messages.StepKeywordType_ACTION, + + "Nuair nár": messages.StepKeywordType_ACTION, + + "Ansin": messages.StepKeywordType_OUTCOME, + + "Agus": messages.StepKeywordType_CONJUNCTION, + + "Ach": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "gj": &Dialect{ + "gj", "Gujarati", "ગુજરાતી", map[string][]string{ + feature: { + "લક્ષણ", + "વ્યાપાર જરૂર", + "ક્ષમતા", + }, + rule: { + "Rule", + }, + background: { + "બેકગ્રાઉન્ડ", + }, + scenario: { + "ઉદાહરણ", + "સ્થિતિ", + }, + scenarioOutline: { + "પરિદ્દશ્ય રૂપરેખા", + "પરિદ્દશ્ય ઢાંચો", + }, + examples: { + "ઉદાહરણો", + }, + given: { + "* ", + "આપેલ છે ", + }, + when: { + "* ", + "ક્યારે ", + }, + then: { + "* ", + "પછી ", + }, + and: { + "* ", + "અને ", + }, + but: { + "* ", + "પણ ", + }, + }, + map[string]messages.StepKeywordType{ + "આપેલ છે ": messages.StepKeywordType_CONTEXT, + + "ક્યારે ": messages.StepKeywordType_ACTION, + + "પછી ": messages.StepKeywordType_OUTCOME, + + "અને ": messages.StepKeywordType_CONJUNCTION, + + "પણ ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "gl": &Dialect{ + "gl", "Galician", "galego", map[string][]string{ + feature: { + "Característica", + }, + rule: { + "Rule", + }, + background: { + "Contexto", + }, + scenario: { + "Exemplo", + "Escenario", + }, + scenarioOutline: { + "Esbozo do escenario", + }, + examples: { + "Exemplos", + }, + given: { + "* ", + "Dado ", + "Dada ", + "Dados ", + "Dadas ", + }, + when: { + "* ", + "Cando ", + }, + then: { + "* ", + "Entón ", + "Logo ", + }, + and: { + "* ", + "E ", + }, + but: { + "* ", + "Mais ", + "Pero ", + }, + }, + map[string]messages.StepKeywordType{ + "Dado ": messages.StepKeywordType_CONTEXT, + + "Dada ": messages.StepKeywordType_CONTEXT, + + "Dados ": messages.StepKeywordType_CONTEXT, + + "Dadas ": messages.StepKeywordType_CONTEXT, + + "Cando ": messages.StepKeywordType_ACTION, + + "Entón ": messages.StepKeywordType_OUTCOME, + + "Logo ": messages.StepKeywordType_OUTCOME, + + "E ": messages.StepKeywordType_CONJUNCTION, + + "Mais ": messages.StepKeywordType_CONJUNCTION, + + "Pero ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "he": &Dialect{ + "he", "Hebrew", "עברית", map[string][]string{ + feature: { + "תכונה", + }, + rule: { + "כלל", + }, + background: { + "רקע", + }, + scenario: { + "דוגמא", + "תרחיש", + }, + scenarioOutline: { + "תבנית תרחיש", + }, + examples: { + "דוגמאות", + }, + given: { + "* ", + "בהינתן ", + }, + when: { + "* ", + "כאשר ", + }, + then: { + "* ", + "אז ", + "אזי ", + }, + and: { + "* ", + "וגם ", + }, + but: { + "* ", + "אבל ", + }, + }, + map[string]messages.StepKeywordType{ + "בהינתן ": messages.StepKeywordType_CONTEXT, + + "כאשר ": messages.StepKeywordType_ACTION, + + "אז ": messages.StepKeywordType_OUTCOME, + + "אזי ": messages.StepKeywordType_OUTCOME, + + "וגם ": messages.StepKeywordType_CONJUNCTION, + + "אבל ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "hi": &Dialect{ + "hi", "Hindi", "हिंदी", map[string][]string{ + feature: { + "रूप लेख", + }, + rule: { + "नियम", + }, + background: { + "पृष्ठभूमि", + }, + scenario: { + "परिदृश्य", + }, + scenarioOutline: { + "परिदृश्य रूपरेखा", + }, + examples: { + "उदाहरण", + }, + given: { + "* ", + "अगर ", + "यदि ", + "चूंकि ", + }, + when: { + "* ", + "जब ", + "कदा ", + }, + then: { + "* ", + "तब ", + "तदा ", + }, + and: { + "* ", + "और ", + "तथा ", + }, + but: { + "* ", + "पर ", + "परन्तु ", + "किन्तु ", + }, + }, + map[string]messages.StepKeywordType{ + "अगर ": messages.StepKeywordType_CONTEXT, + + "यदि ": messages.StepKeywordType_CONTEXT, + + "चूंकि ": messages.StepKeywordType_CONTEXT, + + "जब ": messages.StepKeywordType_ACTION, + + "कदा ": messages.StepKeywordType_ACTION, + + "तब ": messages.StepKeywordType_OUTCOME, + + "तदा ": messages.StepKeywordType_OUTCOME, + + "और ": messages.StepKeywordType_CONJUNCTION, + + "तथा ": messages.StepKeywordType_CONJUNCTION, + + "पर ": messages.StepKeywordType_CONJUNCTION, + + "परन्तु ": messages.StepKeywordType_CONJUNCTION, + + "किन्तु ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "hr": &Dialect{ + "hr", "Croatian", "hrvatski", map[string][]string{ + feature: { + "Osobina", + "Mogućnost", + "Mogucnost", + }, + rule: { + "Rule", + }, + background: { + "Pozadina", + }, + scenario: { + "Primjer", + "Scenarij", + }, + scenarioOutline: { + "Skica", + "Koncept", + }, + examples: { + "Primjeri", + "Scenariji", + }, + given: { + "* ", + "Zadan ", + "Zadani ", + "Zadano ", + "Ukoliko ", + }, + when: { + "* ", + "Kada ", + "Kad ", + }, + then: { + "* ", + "Onda ", + }, + and: { + "* ", + "I ", + }, + but: { + "* ", + "Ali ", + }, + }, + map[string]messages.StepKeywordType{ + "Zadan ": messages.StepKeywordType_CONTEXT, + + "Zadani ": messages.StepKeywordType_CONTEXT, + + "Zadano ": messages.StepKeywordType_CONTEXT, + + "Ukoliko ": messages.StepKeywordType_CONTEXT, + + "Kada ": messages.StepKeywordType_ACTION, + + "Kad ": messages.StepKeywordType_ACTION, + + "Onda ": messages.StepKeywordType_OUTCOME, + + "I ": messages.StepKeywordType_CONJUNCTION, + + "Ali ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "ht": &Dialect{ + "ht", "Creole", "kreyòl", map[string][]string{ + feature: { + "Karakteristik", + "Mak", + "Fonksyonalite", + }, + rule: { + "Rule", + }, + background: { + "Kontèks", + "Istorik", + }, + scenario: { + "Senaryo", + }, + scenarioOutline: { + "Plan senaryo", + "Plan Senaryo", + "Senaryo deskripsyon", + "Senaryo Deskripsyon", + "Dyagram senaryo", + "Dyagram Senaryo", + }, + examples: { + "Egzanp", + }, + given: { + "* ", + "Sipoze ", + "Sipoze ke ", + "Sipoze Ke ", + }, + when: { + "* ", + "Lè ", + "Le ", + }, + then: { + "* ", + "Lè sa a ", + "Le sa a ", + }, + and: { + "* ", + "Ak ", + "Epi ", + "E ", + }, + but: { + "* ", + "Men ", + }, + }, + map[string]messages.StepKeywordType{ + "Sipoze ": messages.StepKeywordType_CONTEXT, + + "Sipoze ke ": messages.StepKeywordType_CONTEXT, + + "Sipoze Ke ": messages.StepKeywordType_CONTEXT, + + "Lè ": messages.StepKeywordType_ACTION, + + "Le ": messages.StepKeywordType_ACTION, + + "Lè sa a ": messages.StepKeywordType_OUTCOME, + + "Le sa a ": messages.StepKeywordType_OUTCOME, + + "Ak ": messages.StepKeywordType_CONJUNCTION, + + "Epi ": messages.StepKeywordType_CONJUNCTION, + + "E ": messages.StepKeywordType_CONJUNCTION, + + "Men ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "hu": &Dialect{ + "hu", "Hungarian", "magyar", map[string][]string{ + feature: { + "Jellemző", + }, + rule: { + "Szabály", + }, + background: { + "Háttér", + }, + scenario: { + "Példa", + "Forgatókönyv", + }, + scenarioOutline: { + "Forgatókönyv vázlat", + }, + examples: { + "Példák", + }, + given: { + "* ", + "Amennyiben ", + "Adott ", + }, + when: { + "* ", + "Majd ", + "Ha ", + "Amikor ", + }, + then: { + "* ", + "Akkor ", + }, + and: { + "* ", + "És ", + }, + but: { + "* ", + "De ", + }, + }, + map[string]messages.StepKeywordType{ + "Amennyiben ": messages.StepKeywordType_CONTEXT, + + "Adott ": messages.StepKeywordType_CONTEXT, + + "Majd ": messages.StepKeywordType_ACTION, + + "Ha ": messages.StepKeywordType_ACTION, + + "Amikor ": messages.StepKeywordType_ACTION, + + "Akkor ": messages.StepKeywordType_OUTCOME, + + "És ": messages.StepKeywordType_CONJUNCTION, + + "De ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "id": &Dialect{ + "id", "Indonesian", "Bahasa Indonesia", map[string][]string{ + feature: { + "Fitur", + }, + rule: { + "Rule", + "Aturan", + }, + background: { + "Dasar", + "Latar Belakang", + }, + scenario: { + "Skenario", + }, + scenarioOutline: { + "Skenario konsep", + "Garis-Besar Skenario", + }, + examples: { + "Contoh", + "Misal", + }, + given: { + "* ", + "Dengan ", + "Diketahui ", + "Diasumsikan ", + "Bila ", + "Jika ", + }, + when: { + "* ", + "Ketika ", + }, + then: { + "* ", + "Maka ", + "Kemudian ", + }, + and: { + "* ", + "Dan ", + }, + but: { + "* ", + "Tapi ", + "Tetapi ", + }, + }, + map[string]messages.StepKeywordType{ + "Dengan ": messages.StepKeywordType_CONTEXT, + + "Diketahui ": messages.StepKeywordType_CONTEXT, + + "Diasumsikan ": messages.StepKeywordType_CONTEXT, + + "Bila ": messages.StepKeywordType_CONTEXT, + + "Jika ": messages.StepKeywordType_CONTEXT, + + "Ketika ": messages.StepKeywordType_ACTION, + + "Maka ": messages.StepKeywordType_OUTCOME, + + "Kemudian ": messages.StepKeywordType_OUTCOME, + + "Dan ": messages.StepKeywordType_CONJUNCTION, + + "Tapi ": messages.StepKeywordType_CONJUNCTION, + + "Tetapi ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "is": &Dialect{ + "is", "Icelandic", "Íslenska", map[string][]string{ + feature: { + "Eiginleiki", + }, + rule: { + "Rule", + }, + background: { + "Bakgrunnur", + }, + scenario: { + "Atburðarás", + }, + scenarioOutline: { + "Lýsing Atburðarásar", + "Lýsing Dæma", + }, + examples: { + "Dæmi", + "Atburðarásir", + }, + given: { + "* ", + "Ef ", + }, + when: { + "* ", + "Þegar ", + }, + then: { + "* ", + "Þá ", + }, + and: { + "* ", + "Og ", + }, + but: { + "* ", + "En ", + }, + }, + map[string]messages.StepKeywordType{ + "Ef ": messages.StepKeywordType_CONTEXT, + + "Þegar ": messages.StepKeywordType_ACTION, + + "Þá ": messages.StepKeywordType_OUTCOME, + + "Og ": messages.StepKeywordType_CONJUNCTION, + + "En ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "it": &Dialect{ + "it", "Italian", "italiano", map[string][]string{ + feature: { + "Funzionalità", + "Esigenza di Business", + "Abilità", + }, + rule: { + "Regola", + }, + background: { + "Contesto", + }, + scenario: { + "Esempio", + "Scenario", + }, + scenarioOutline: { + "Schema dello scenario", + }, + examples: { + "Esempi", + }, + given: { + "* ", + "Dato ", + "Data ", + "Dati ", + "Date ", + }, + when: { + "* ", + "Quando ", + }, + then: { + "* ", + "Allora ", + }, + and: { + "* ", + "E ", + }, + but: { + "* ", + "Ma ", + }, + }, + map[string]messages.StepKeywordType{ + "Dato ": messages.StepKeywordType_CONTEXT, + + "Data ": messages.StepKeywordType_CONTEXT, + + "Dati ": messages.StepKeywordType_CONTEXT, + + "Date ": messages.StepKeywordType_CONTEXT, + + "Quando ": messages.StepKeywordType_ACTION, + + "Allora ": messages.StepKeywordType_OUTCOME, + + "E ": messages.StepKeywordType_CONJUNCTION, + + "Ma ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "ja": &Dialect{ + "ja", "Japanese", "日本語", map[string][]string{ + feature: { + "フィーチャ", + "機能", + }, + rule: { + "ルール", + }, + background: { + "背景", + }, + scenario: { + "シナリオ", + }, + scenarioOutline: { + "シナリオアウトライン", + "シナリオテンプレート", + "テンプレ", + "シナリオテンプレ", + }, + examples: { + "例", + "サンプル", + }, + given: { + "* ", + "前提", + }, + when: { + "* ", + "もし", + }, + then: { + "* ", + "ならば", + }, + and: { + "* ", + "且つ", + "かつ", + }, + but: { + "* ", + "然し", + "しかし", + "但し", + "ただし", + }, + }, + map[string]messages.StepKeywordType{ + "前提": messages.StepKeywordType_CONTEXT, + + "もし": messages.StepKeywordType_ACTION, + + "ならば": messages.StepKeywordType_OUTCOME, + + "且つ": messages.StepKeywordType_CONJUNCTION, + + "かつ": messages.StepKeywordType_CONJUNCTION, + + "然し": messages.StepKeywordType_CONJUNCTION, + + "しかし": messages.StepKeywordType_CONJUNCTION, + + "但し": messages.StepKeywordType_CONJUNCTION, + + "ただし": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "jv": &Dialect{ + "jv", "Javanese", "Basa Jawa", map[string][]string{ + feature: { + "Fitur", + }, + rule: { + "Rule", + }, + background: { + "Dasar", + }, + scenario: { + "Skenario", + }, + scenarioOutline: { + "Konsep skenario", + }, + examples: { + "Conto", + "Contone", + }, + given: { + "* ", + "Nalika ", + "Nalikaning ", + }, + when: { + "* ", + "Manawa ", + "Menawa ", + }, + then: { + "* ", + "Njuk ", + "Banjur ", + }, + and: { + "* ", + "Lan ", + }, + but: { + "* ", + "Tapi ", + "Nanging ", + "Ananging ", + }, + }, + map[string]messages.StepKeywordType{ + "Nalika ": messages.StepKeywordType_CONTEXT, + + "Nalikaning ": messages.StepKeywordType_CONTEXT, + + "Manawa ": messages.StepKeywordType_ACTION, + + "Menawa ": messages.StepKeywordType_ACTION, + + "Njuk ": messages.StepKeywordType_OUTCOME, + + "Banjur ": messages.StepKeywordType_OUTCOME, + + "Lan ": messages.StepKeywordType_CONJUNCTION, + + "Tapi ": messages.StepKeywordType_CONJUNCTION, + + "Nanging ": messages.StepKeywordType_CONJUNCTION, + + "Ananging ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "ka": &Dialect{ + "ka", "Georgian", "ქართული", map[string][]string{ + feature: { + "თვისება", + "მოთხოვნა", + }, + rule: { + "წესი", + }, + background: { + "კონტექსტი", + }, + scenario: { + "მაგალითად", + "მაგალითი", + "მაგ", + "სცენარი", + }, + scenarioOutline: { + "სცენარის ნიმუში", + "სცენარის შაბლონი", + "ნიმუში", + "შაბლონი", + }, + examples: { + "მაგალითები", + }, + given: { + "* ", + "მოცემული ", + "მოცემულია ", + "ვთქვათ ", + }, + when: { + "* ", + "როდესაც ", + "როცა ", + "როგორც კი ", + "თუ ", + }, + then: { + "* ", + "მაშინ ", + }, + and: { + "* ", + "და ", + "ასევე ", + }, + but: { + "* ", + "მაგრამ ", + "თუმცა ", + }, + }, + map[string]messages.StepKeywordType{ + "მოცემული ": messages.StepKeywordType_CONTEXT, + + "მოცემულია ": messages.StepKeywordType_CONTEXT, + + "ვთქვათ ": messages.StepKeywordType_CONTEXT, + + "როდესაც ": messages.StepKeywordType_ACTION, + + "როცა ": messages.StepKeywordType_ACTION, + + "როგორც კი ": messages.StepKeywordType_ACTION, + + "თუ ": messages.StepKeywordType_ACTION, + + "მაშინ ": messages.StepKeywordType_OUTCOME, + + "და ": messages.StepKeywordType_CONJUNCTION, + + "ასევე ": messages.StepKeywordType_CONJUNCTION, + + "მაგრამ ": messages.StepKeywordType_CONJUNCTION, + + "თუმცა ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "kn": &Dialect{ + "kn", "Kannada", "ಕನ್ನಡ", map[string][]string{ + feature: { + "ಹೆಚ್ಚಳ", + }, + rule: { + "Rule", + }, + background: { + "ಹಿನ್ನೆಲೆ", + }, + scenario: { + "ಉದಾಹರಣೆ", + "ಕಥಾಸಾರಾಂಶ", + }, + scenarioOutline: { + "ವಿವರಣೆ", + }, + examples: { + "ಉದಾಹರಣೆಗಳು", + }, + given: { + "* ", + "ನೀಡಿದ ", + }, + when: { + "* ", + "ಸ್ಥಿತಿಯನ್ನು ", + }, + then: { + "* ", + "ನಂತರ ", + }, + and: { + "* ", + "ಮತ್ತು ", + }, + but: { + "* ", + "ಆದರೆ ", + }, + }, + map[string]messages.StepKeywordType{ + "ನೀಡಿದ ": messages.StepKeywordType_CONTEXT, + + "ಸ್ಥಿತಿಯನ್ನು ": messages.StepKeywordType_ACTION, + + "ನಂತರ ": messages.StepKeywordType_OUTCOME, + + "ಮತ್ತು ": messages.StepKeywordType_CONJUNCTION, + + "ಆದರೆ ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "ko": &Dialect{ + "ko", "Korean", "한국어", map[string][]string{ + feature: { + "기능", + }, + rule: { + "Rule", + }, + background: { + "배경", + }, + scenario: { + "시나리오", + }, + scenarioOutline: { + "시나리오 개요", + }, + examples: { + "예", + }, + given: { + "* ", + "조건", + "먼저", + }, + when: { + "* ", + "만일", + "만약", + }, + then: { + "* ", + "그러면", + }, + and: { + "* ", + "그리고", + }, + but: { + "* ", + "하지만", + "단", + }, + }, + map[string]messages.StepKeywordType{ + "조건": messages.StepKeywordType_CONTEXT, + + "먼저": messages.StepKeywordType_CONTEXT, + + "만일": messages.StepKeywordType_ACTION, + + "만약": messages.StepKeywordType_ACTION, + + "그러면": messages.StepKeywordType_OUTCOME, + + "그리고": messages.StepKeywordType_CONJUNCTION, + + "하지만": messages.StepKeywordType_CONJUNCTION, + + "단": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "lt": &Dialect{ + "lt", "Lithuanian", "lietuvių kalba", map[string][]string{ + feature: { + "Savybė", + }, + rule: { + "Rule", + }, + background: { + "Kontekstas", + }, + scenario: { + "Pavyzdys", + "Scenarijus", + }, + scenarioOutline: { + "Scenarijaus šablonas", + }, + examples: { + "Pavyzdžiai", + "Scenarijai", + "Variantai", + }, + given: { + "* ", + "Duota ", + }, + when: { + "* ", + "Kai ", + }, + then: { + "* ", + "Tada ", + }, + and: { + "* ", + "Ir ", + }, + but: { + "* ", + "Bet ", + }, + }, + map[string]messages.StepKeywordType{ + "Duota ": messages.StepKeywordType_CONTEXT, + + "Kai ": messages.StepKeywordType_ACTION, + + "Tada ": messages.StepKeywordType_OUTCOME, + + "Ir ": messages.StepKeywordType_CONJUNCTION, + + "Bet ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "lu": &Dialect{ + "lu", "Luxemburgish", "Lëtzebuergesch", map[string][]string{ + feature: { + "Funktionalitéit", + }, + rule: { + "Rule", + }, + background: { + "Hannergrond", + }, + scenario: { + "Beispill", + "Szenario", + }, + scenarioOutline: { + "Plang vum Szenario", + }, + examples: { + "Beispiller", + }, + given: { + "* ", + "ugeholl ", + }, + when: { + "* ", + "wann ", + }, + then: { + "* ", + "dann ", + }, + and: { + "* ", + "an ", + "a ", + }, + but: { + "* ", + "awer ", + "mä ", + }, + }, + map[string]messages.StepKeywordType{ + "ugeholl ": messages.StepKeywordType_CONTEXT, + + "wann ": messages.StepKeywordType_ACTION, + + "dann ": messages.StepKeywordType_OUTCOME, + + "an ": messages.StepKeywordType_CONJUNCTION, + + "a ": messages.StepKeywordType_CONJUNCTION, + + "awer ": messages.StepKeywordType_CONJUNCTION, + + "mä ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "lv": &Dialect{ + "lv", "Latvian", "latviešu", map[string][]string{ + feature: { + "Funkcionalitāte", + "Fīča", + }, + rule: { + "Rule", + }, + background: { + "Konteksts", + "Situācija", + }, + scenario: { + "Piemērs", + "Scenārijs", + }, + scenarioOutline: { + "Scenārijs pēc parauga", + }, + examples: { + "Piemēri", + "Paraugs", + }, + given: { + "* ", + "Kad ", + }, + when: { + "* ", + "Ja ", + }, + then: { + "* ", + "Tad ", + }, + and: { + "* ", + "Un ", + }, + but: { + "* ", + "Bet ", + }, + }, + map[string]messages.StepKeywordType{ + "Kad ": messages.StepKeywordType_CONTEXT, + + "Ja ": messages.StepKeywordType_ACTION, + + "Tad ": messages.StepKeywordType_OUTCOME, + + "Un ": messages.StepKeywordType_CONJUNCTION, + + "Bet ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "mk-Cyrl": &Dialect{ + "mk-Cyrl", "Macedonian", "Македонски", map[string][]string{ + feature: { + "Функционалност", + "Бизнис потреба", + "Можност", + }, + rule: { + "Rule", + }, + background: { + "Контекст", + "Содржина", + }, + scenario: { + "Пример", + "Сценарио", + "На пример", + }, + scenarioOutline: { + "Преглед на сценарија", + "Скица", + "Концепт", + }, + examples: { + "Примери", + "Сценарија", + }, + given: { + "* ", + "Дадено ", + "Дадена ", + }, + when: { + "* ", + "Кога ", + }, + then: { + "* ", + "Тогаш ", + }, + and: { + "* ", + "И ", + }, + but: { + "* ", + "Но ", + }, + }, + map[string]messages.StepKeywordType{ + "Дадено ": messages.StepKeywordType_CONTEXT, + + "Дадена ": messages.StepKeywordType_CONTEXT, + + "Кога ": messages.StepKeywordType_ACTION, + + "Тогаш ": messages.StepKeywordType_OUTCOME, + + "И ": messages.StepKeywordType_CONJUNCTION, + + "Но ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "mk-Latn": &Dialect{ + "mk-Latn", "Macedonian (Latin)", "Makedonski (Latinica)", map[string][]string{ + feature: { + "Funkcionalnost", + "Biznis potreba", + "Mozhnost", + }, + rule: { + "Rule", + }, + background: { + "Kontekst", + "Sodrzhina", + }, + scenario: { + "Scenario", + "Na primer", + }, + scenarioOutline: { + "Pregled na scenarija", + "Skica", + "Koncept", + }, + examples: { + "Primeri", + "Scenaria", + }, + given: { + "* ", + "Dadeno ", + "Dadena ", + }, + when: { + "* ", + "Koga ", + }, + then: { + "* ", + "Togash ", + }, + and: { + "* ", + "I ", + }, + but: { + "* ", + "No ", + }, + }, + map[string]messages.StepKeywordType{ + "Dadeno ": messages.StepKeywordType_CONTEXT, + + "Dadena ": messages.StepKeywordType_CONTEXT, + + "Koga ": messages.StepKeywordType_ACTION, + + "Togash ": messages.StepKeywordType_OUTCOME, + + "I ": messages.StepKeywordType_CONJUNCTION, + + "No ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "mn": &Dialect{ + "mn", "Mongolian", "монгол", map[string][]string{ + feature: { + "Функц", + "Функционал", + }, + rule: { + "Rule", + }, + background: { + "Агуулга", + }, + scenario: { + "Сценар", + }, + scenarioOutline: { + "Сценарын төлөвлөгөө", + }, + examples: { + "Тухайлбал", + }, + given: { + "* ", + "Өгөгдсөн нь ", + "Анх ", + }, + when: { + "* ", + "Хэрэв ", + }, + then: { + "* ", + "Тэгэхэд ", + "Үүний дараа ", + }, + and: { + "* ", + "Мөн ", + "Тэгээд ", + }, + but: { + "* ", + "Гэхдээ ", + "Харин ", + }, + }, + map[string]messages.StepKeywordType{ + "Өгөгдсөн нь ": messages.StepKeywordType_CONTEXT, + + "Анх ": messages.StepKeywordType_CONTEXT, + + "Хэрэв ": messages.StepKeywordType_ACTION, + + "Тэгэхэд ": messages.StepKeywordType_OUTCOME, + + "Үүний дараа ": messages.StepKeywordType_OUTCOME, + + "Мөн ": messages.StepKeywordType_CONJUNCTION, + + "Тэгээд ": messages.StepKeywordType_CONJUNCTION, + + "Гэхдээ ": messages.StepKeywordType_CONJUNCTION, + + "Харин ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "ne": &Dialect{ + "ne", "Nepali", "नेपाली", map[string][]string{ + feature: { + "सुविधा", + "विशेषता", + }, + rule: { + "नियम", + }, + background: { + "पृष्ठभूमी", + }, + scenario: { + "परिदृश्य", + }, + scenarioOutline: { + "परिदृश्य रूपरेखा", + }, + examples: { + "उदाहरण", + "उदाहरणहरु", + }, + given: { + "* ", + "दिइएको ", + "दिएको ", + "यदि ", + }, + when: { + "* ", + "जब ", + }, + then: { + "* ", + "त्यसपछि ", + "अनी ", + }, + and: { + "* ", + "र ", + "अनि ", + }, + but: { + "* ", + "तर ", + }, + }, + map[string]messages.StepKeywordType{ + "दिइएको ": messages.StepKeywordType_CONTEXT, + + "दिएको ": messages.StepKeywordType_CONTEXT, + + "यदि ": messages.StepKeywordType_CONTEXT, + + "जब ": messages.StepKeywordType_ACTION, + + "त्यसपछि ": messages.StepKeywordType_OUTCOME, + + "अनी ": messages.StepKeywordType_OUTCOME, + + "र ": messages.StepKeywordType_CONJUNCTION, + + "अनि ": messages.StepKeywordType_CONJUNCTION, + + "तर ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "nl": &Dialect{ + "nl", "Dutch", "Nederlands", map[string][]string{ + feature: { + "Functionaliteit", + }, + rule: { + "Rule", + }, + background: { + "Achtergrond", + }, + scenario: { + "Voorbeeld", + "Scenario", + }, + scenarioOutline: { + "Abstract Scenario", + }, + examples: { + "Voorbeelden", + }, + given: { + "* ", + "Gegeven ", + "Stel ", + }, + when: { + "* ", + "Als ", + "Wanneer ", + }, + then: { + "* ", + "Dan ", + }, + and: { + "* ", + "En ", + }, + but: { + "* ", + "Maar ", + }, + }, + map[string]messages.StepKeywordType{ + "Gegeven ": messages.StepKeywordType_CONTEXT, + + "Stel ": messages.StepKeywordType_CONTEXT, + + "Als ": messages.StepKeywordType_ACTION, + + "Wanneer ": messages.StepKeywordType_ACTION, + + "Dan ": messages.StepKeywordType_OUTCOME, + + "En ": messages.StepKeywordType_CONJUNCTION, + + "Maar ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "no": &Dialect{ + "no", "Norwegian", "norsk", map[string][]string{ + feature: { + "Egenskap", + }, + rule: { + "Regel", + }, + background: { + "Bakgrunn", + }, + scenario: { + "Eksempel", + "Scenario", + }, + scenarioOutline: { + "Scenariomal", + "Abstrakt Scenario", + }, + examples: { + "Eksempler", + }, + given: { + "* ", + "Gitt ", + }, + when: { + "* ", + "Når ", + }, + then: { + "* ", + "Så ", + }, + and: { + "* ", + "Og ", + }, + but: { + "* ", + "Men ", + }, + }, + map[string]messages.StepKeywordType{ + "Gitt ": messages.StepKeywordType_CONTEXT, + + "Når ": messages.StepKeywordType_ACTION, + + "Så ": messages.StepKeywordType_OUTCOME, + + "Og ": messages.StepKeywordType_CONJUNCTION, + + "Men ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "pa": &Dialect{ + "pa", "Panjabi", "ਪੰਜਾਬੀ", map[string][]string{ + feature: { + "ਖਾਸੀਅਤ", + "ਮੁਹਾਂਦਰਾ", + "ਨਕਸ਼ ਨੁਹਾਰ", + }, + rule: { + "Rule", + }, + background: { + "ਪਿਛੋਕੜ", + }, + scenario: { + "ਉਦਾਹਰਨ", + "ਪਟਕਥਾ", + }, + scenarioOutline: { + "ਪਟਕਥਾ ਢਾਂਚਾ", + "ਪਟਕਥਾ ਰੂਪ ਰੇਖਾ", + }, + examples: { + "ਉਦਾਹਰਨਾਂ", + }, + given: { + "* ", + "ਜੇਕਰ ", + "ਜਿਵੇਂ ਕਿ ", + }, + when: { + "* ", + "ਜਦੋਂ ", + }, + then: { + "* ", + "ਤਦ ", + }, + and: { + "* ", + "ਅਤੇ ", + }, + but: { + "* ", + "ਪਰ ", + }, + }, + map[string]messages.StepKeywordType{ + "ਜੇਕਰ ": messages.StepKeywordType_CONTEXT, + + "ਜਿਵੇਂ ਕਿ ": messages.StepKeywordType_CONTEXT, + + "ਜਦੋਂ ": messages.StepKeywordType_ACTION, + + "ਤਦ ": messages.StepKeywordType_OUTCOME, + + "ਅਤੇ ": messages.StepKeywordType_CONJUNCTION, + + "ਪਰ ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "pl": &Dialect{ + "pl", "Polish", "polski", map[string][]string{ + feature: { + "Właściwość", + "Funkcja", + "Aspekt", + "Potrzeba biznesowa", + }, + rule: { + "Zasada", + "Reguła", + }, + background: { + "Założenia", + }, + scenario: { + "Przykład", + "Scenariusz", + }, + scenarioOutline: { + "Szablon scenariusza", + }, + examples: { + "Przykłady", + }, + given: { + "* ", + "Zakładając ", + "Mając ", + "Zakładając, że ", + }, + when: { + "* ", + "Jeżeli ", + "Jeśli ", + "Gdy ", + "Kiedy ", + }, + then: { + "* ", + "Wtedy ", + }, + and: { + "* ", + "Oraz ", + "I ", + }, + but: { + "* ", + "Ale ", + }, + }, + map[string]messages.StepKeywordType{ + "Zakładając ": messages.StepKeywordType_CONTEXT, + + "Mając ": messages.StepKeywordType_CONTEXT, + + "Zakładając, że ": messages.StepKeywordType_CONTEXT, + + "Jeżeli ": messages.StepKeywordType_ACTION, + + "Jeśli ": messages.StepKeywordType_ACTION, + + "Gdy ": messages.StepKeywordType_ACTION, + + "Kiedy ": messages.StepKeywordType_ACTION, + + "Wtedy ": messages.StepKeywordType_OUTCOME, + + "Oraz ": messages.StepKeywordType_CONJUNCTION, + + "I ": messages.StepKeywordType_CONJUNCTION, + + "Ale ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "pt": &Dialect{ + "pt", "Portuguese", "português", map[string][]string{ + feature: { + "Funcionalidade", + "Característica", + "Caracteristica", + }, + rule: { + "Regra", + }, + background: { + "Contexto", + "Cenário de Fundo", + "Cenario de Fundo", + "Fundo", + }, + scenario: { + "Exemplo", + "Cenário", + "Cenario", + }, + scenarioOutline: { + "Esquema do Cenário", + "Esquema do Cenario", + "Delineação do Cenário", + "Delineacao do Cenario", + }, + examples: { + "Exemplos", + "Cenários", + "Cenarios", + }, + given: { + "* ", + "Dado ", + "Dada ", + "Dados ", + "Dadas ", + }, + when: { + "* ", + "Quando ", + }, + then: { + "* ", + "Então ", + "Entao ", + }, + and: { + "* ", + "E ", + }, + but: { + "* ", + "Mas ", + }, + }, + map[string]messages.StepKeywordType{ + "Dado ": messages.StepKeywordType_CONTEXT, + + "Dada ": messages.StepKeywordType_CONTEXT, + + "Dados ": messages.StepKeywordType_CONTEXT, + + "Dadas ": messages.StepKeywordType_CONTEXT, + + "Quando ": messages.StepKeywordType_ACTION, + + "Então ": messages.StepKeywordType_OUTCOME, + + "Entao ": messages.StepKeywordType_OUTCOME, + + "E ": messages.StepKeywordType_CONJUNCTION, + + "Mas ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "ro": &Dialect{ + "ro", "Romanian", "română", map[string][]string{ + feature: { + "Functionalitate", + "Funcționalitate", + "Funcţionalitate", + }, + rule: { + "Rule", + }, + background: { + "Context", + }, + scenario: { + "Exemplu", + "Scenariu", + }, + scenarioOutline: { + "Structura scenariu", + "Structură scenariu", + }, + examples: { + "Exemple", + }, + given: { + "* ", + "Date fiind ", + "Dat fiind ", + "Dată fiind", + "Dati fiind ", + "Dați fiind ", + "Daţi fiind ", + }, + when: { + "* ", + "Cand ", + "Când ", + }, + then: { + "* ", + "Atunci ", + }, + and: { + "* ", + "Si ", + "Și ", + "Şi ", + }, + but: { + "* ", + "Dar ", + }, + }, + map[string]messages.StepKeywordType{ + "Date fiind ": messages.StepKeywordType_CONTEXT, + + "Dat fiind ": messages.StepKeywordType_CONTEXT, + + "Dată fiind": messages.StepKeywordType_CONTEXT, + + "Dati fiind ": messages.StepKeywordType_CONTEXT, + + "Dați fiind ": messages.StepKeywordType_CONTEXT, + + "Daţi fiind ": messages.StepKeywordType_CONTEXT, + + "Cand ": messages.StepKeywordType_ACTION, + + "Când ": messages.StepKeywordType_ACTION, + + "Atunci ": messages.StepKeywordType_OUTCOME, + + "Si ": messages.StepKeywordType_CONJUNCTION, + + "Și ": messages.StepKeywordType_CONJUNCTION, + + "Şi ": messages.StepKeywordType_CONJUNCTION, + + "Dar ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "ru": &Dialect{ + "ru", "Russian", "русский", map[string][]string{ + feature: { + "Функция", + "Функциональность", + "Функционал", + "Свойство", + "Фича", + }, + rule: { + "Правило", + }, + background: { + "Предыстория", + "Контекст", + }, + scenario: { + "Пример", + "Сценарий", + }, + scenarioOutline: { + "Структура сценария", + "Шаблон сценария", + }, + examples: { + "Примеры", + }, + given: { + "* ", + "Допустим ", + "Дано ", + "Пусть ", + }, + when: { + "* ", + "Когда ", + "Если ", + }, + then: { + "* ", + "То ", + "Затем ", + "Тогда ", + }, + and: { + "* ", + "И ", + "К тому же ", + "Также ", + }, + but: { + "* ", + "Но ", + "А ", + "Иначе ", + }, + }, + map[string]messages.StepKeywordType{ + "Допустим ": messages.StepKeywordType_CONTEXT, + + "Дано ": messages.StepKeywordType_CONTEXT, + + "Пусть ": messages.StepKeywordType_CONTEXT, + + "Когда ": messages.StepKeywordType_ACTION, + + "Если ": messages.StepKeywordType_ACTION, + + "То ": messages.StepKeywordType_OUTCOME, + + "Затем ": messages.StepKeywordType_OUTCOME, + + "Тогда ": messages.StepKeywordType_OUTCOME, + + "И ": messages.StepKeywordType_CONJUNCTION, + + "К тому же ": messages.StepKeywordType_CONJUNCTION, + + "Также ": messages.StepKeywordType_CONJUNCTION, + + "Но ": messages.StepKeywordType_CONJUNCTION, + + "А ": messages.StepKeywordType_CONJUNCTION, + + "Иначе ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "sk": &Dialect{ + "sk", "Slovak", "Slovensky", map[string][]string{ + feature: { + "Požiadavka", + "Funkcia", + "Vlastnosť", + }, + rule: { + "Rule", + }, + background: { + "Pozadie", + }, + scenario: { + "Príklad", + "Scenár", + }, + scenarioOutline: { + "Náčrt Scenáru", + "Náčrt Scenára", + "Osnova Scenára", + }, + examples: { + "Príklady", + }, + given: { + "* ", + "Pokiaľ ", + "Za predpokladu ", + }, + when: { + "* ", + "Keď ", + "Ak ", + }, + then: { + "* ", + "Tak ", + "Potom ", + }, + and: { + "* ", + "A ", + "A tiež ", + "A taktiež ", + "A zároveň ", + }, + but: { + "* ", + "Ale ", + }, + }, + map[string]messages.StepKeywordType{ + "Pokiaľ ": messages.StepKeywordType_CONTEXT, + + "Za predpokladu ": messages.StepKeywordType_CONTEXT, + + "Keď ": messages.StepKeywordType_ACTION, + + "Ak ": messages.StepKeywordType_ACTION, + + "Tak ": messages.StepKeywordType_OUTCOME, + + "Potom ": messages.StepKeywordType_OUTCOME, + + "A ": messages.StepKeywordType_CONJUNCTION, + + "A tiež ": messages.StepKeywordType_CONJUNCTION, + + "A taktiež ": messages.StepKeywordType_CONJUNCTION, + + "A zároveň ": messages.StepKeywordType_CONJUNCTION, + + "Ale ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "sl": &Dialect{ + "sl", "Slovenian", "Slovenski", map[string][]string{ + feature: { + "Funkcionalnost", + "Funkcija", + "Možnosti", + "Moznosti", + "Lastnost", + "Značilnost", + }, + rule: { + "Rule", + }, + background: { + "Kontekst", + "Osnova", + "Ozadje", + }, + scenario: { + "Primer", + "Scenarij", + }, + scenarioOutline: { + "Struktura scenarija", + "Skica", + "Koncept", + "Oris scenarija", + "Osnutek", + }, + examples: { + "Primeri", + "Scenariji", + }, + given: { + "Dano ", + "Podano ", + "Zaradi ", + "Privzeto ", + }, + when: { + "Ko ", + "Ce ", + "Če ", + "Kadar ", + }, + then: { + "Nato ", + "Potem ", + "Takrat ", + }, + and: { + "In ", + "Ter ", + }, + but: { + "Toda ", + "Ampak ", + "Vendar ", + }, + }, + map[string]messages.StepKeywordType{ + "Dano ": messages.StepKeywordType_CONTEXT, + + "Podano ": messages.StepKeywordType_CONTEXT, + + "Zaradi ": messages.StepKeywordType_CONTEXT, + + "Privzeto ": messages.StepKeywordType_CONTEXT, + + "Ko ": messages.StepKeywordType_ACTION, + + "Ce ": messages.StepKeywordType_ACTION, + + "Če ": messages.StepKeywordType_ACTION, + + "Kadar ": messages.StepKeywordType_ACTION, + + "Nato ": messages.StepKeywordType_OUTCOME, + + "Potem ": messages.StepKeywordType_OUTCOME, + + "Takrat ": messages.StepKeywordType_OUTCOME, + + "In ": messages.StepKeywordType_CONJUNCTION, + + "Ter ": messages.StepKeywordType_CONJUNCTION, + + "Toda ": messages.StepKeywordType_CONJUNCTION, + + "Ampak ": messages.StepKeywordType_CONJUNCTION, + + "Vendar ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "sr-Cyrl": &Dialect{ + "sr-Cyrl", "Serbian", "Српски", map[string][]string{ + feature: { + "Функционалност", + "Могућност", + "Особина", + }, + rule: { + "Правило", + }, + background: { + "Контекст", + "Основа", + "Позадина", + }, + scenario: { + "Пример", + "Сценарио", + "Пример", + }, + scenarioOutline: { + "Структура сценарија", + "Скица", + "Концепт", + }, + examples: { + "Примери", + "Сценарији", + }, + given: { + "* ", + "За дато ", + "За дате ", + "За дати ", + }, + when: { + "* ", + "Када ", + "Кад ", + }, + then: { + "* ", + "Онда ", + }, + and: { + "* ", + "И ", + }, + but: { + "* ", + "Али ", + }, + }, + map[string]messages.StepKeywordType{ + "За дато ": messages.StepKeywordType_CONTEXT, + + "За дате ": messages.StepKeywordType_CONTEXT, + + "За дати ": messages.StepKeywordType_CONTEXT, + + "Када ": messages.StepKeywordType_ACTION, + + "Кад ": messages.StepKeywordType_ACTION, + + "Онда ": messages.StepKeywordType_OUTCOME, + + "И ": messages.StepKeywordType_CONJUNCTION, + + "Али ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "sr-Latn": &Dialect{ + "sr-Latn", "Serbian (Latin)", "Srpski (Latinica)", map[string][]string{ + feature: { + "Funkcionalnost", + "Mogućnost", + "Mogucnost", + "Osobina", + }, + rule: { + "Pravilo", + }, + background: { + "Kontekst", + "Osnova", + "Pozadina", + }, + scenario: { + "Scenario", + "Primer", + }, + scenarioOutline: { + "Struktura scenarija", + "Skica", + "Koncept", + }, + examples: { + "Primeri", + "Scenariji", + }, + given: { + "* ", + "Za dato ", + "Za date ", + "Za dati ", + }, + when: { + "* ", + "Kada ", + "Kad ", + }, + then: { + "* ", + "Onda ", + }, + and: { + "* ", + "I ", + }, + but: { + "* ", + "Ali ", + }, + }, + map[string]messages.StepKeywordType{ + "Za dato ": messages.StepKeywordType_CONTEXT, + + "Za date ": messages.StepKeywordType_CONTEXT, + + "Za dati ": messages.StepKeywordType_CONTEXT, + + "Kada ": messages.StepKeywordType_ACTION, + + "Kad ": messages.StepKeywordType_ACTION, + + "Onda ": messages.StepKeywordType_OUTCOME, + + "I ": messages.StepKeywordType_CONJUNCTION, + + "Ali ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "sv": &Dialect{ + "sv", "Swedish", "Svenska", map[string][]string{ + feature: { + "Egenskap", + }, + rule: { + "Regel", + }, + background: { + "Bakgrund", + }, + scenario: { + "Scenario", + }, + scenarioOutline: { + "Abstrakt Scenario", + "Scenariomall", + }, + examples: { + "Exempel", + }, + given: { + "* ", + "Givet ", + }, + when: { + "* ", + "När ", + }, + then: { + "* ", + "Så ", + }, + and: { + "* ", + "Och ", + }, + but: { + "* ", + "Men ", + }, + }, + map[string]messages.StepKeywordType{ + "Givet ": messages.StepKeywordType_CONTEXT, + + "När ": messages.StepKeywordType_ACTION, + + "Så ": messages.StepKeywordType_OUTCOME, + + "Och ": messages.StepKeywordType_CONJUNCTION, + + "Men ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "ta": &Dialect{ + "ta", "Tamil", "தமிழ்", map[string][]string{ + feature: { + "அம்சம்", + "வணிக தேவை", + "திறன்", + }, + rule: { + "Rule", + }, + background: { + "பின்னணி", + }, + scenario: { + "உதாரணமாக", + "காட்சி", + }, + scenarioOutline: { + "காட்சி சுருக்கம்", + "காட்சி வார்ப்புரு", + }, + examples: { + "எடுத்துக்காட்டுகள்", + "காட்சிகள்", + "நிலைமைகளில்", + }, + given: { + "* ", + "கொடுக்கப்பட்ட ", + }, + when: { + "* ", + "எப்போது ", + }, + then: { + "* ", + "அப்பொழுது ", + }, + and: { + "* ", + "மேலும் ", + "மற்றும் ", + }, + but: { + "* ", + "ஆனால் ", + }, + }, + map[string]messages.StepKeywordType{ + "கொடுக்கப்பட்ட ": messages.StepKeywordType_CONTEXT, + + "எப்போது ": messages.StepKeywordType_ACTION, + + "அப்பொழுது ": messages.StepKeywordType_OUTCOME, + + "மேலும் ": messages.StepKeywordType_CONJUNCTION, + + "மற்றும் ": messages.StepKeywordType_CONJUNCTION, + + "ஆனால் ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "th": &Dialect{ + "th", "Thai", "ไทย", map[string][]string{ + feature: { + "โครงหลัก", + "ความต้องการทางธุรกิจ", + "ความสามารถ", + }, + rule: { + "Rule", + }, + background: { + "แนวคิด", + }, + scenario: { + "เหตุการณ์", + }, + scenarioOutline: { + "สรุปเหตุการณ์", + "โครงสร้างของเหตุการณ์", + }, + examples: { + "ชุดของตัวอย่าง", + "ชุดของเหตุการณ์", + }, + given: { + "* ", + "กำหนดให้ ", + }, + when: { + "* ", + "เมื่อ ", + }, + then: { + "* ", + "ดังนั้น ", + }, + and: { + "* ", + "และ ", + }, + but: { + "* ", + "แต่ ", + }, + }, + map[string]messages.StepKeywordType{ + "กำหนดให้ ": messages.StepKeywordType_CONTEXT, + + "เมื่อ ": messages.StepKeywordType_ACTION, + + "ดังนั้น ": messages.StepKeywordType_OUTCOME, + + "และ ": messages.StepKeywordType_CONJUNCTION, + + "แต่ ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "te": &Dialect{ + "te", "Telugu", "తెలుగు", map[string][]string{ + feature: { + "గుణము", + }, + rule: { + "Rule", + }, + background: { + "నేపథ్యం", + }, + scenario: { + "ఉదాహరణ", + "సన్నివేశం", + }, + scenarioOutline: { + "కథనం", + }, + examples: { + "ఉదాహరణలు", + }, + given: { + "* ", + "చెప్పబడినది ", + }, + when: { + "* ", + "ఈ పరిస్థితిలో ", + }, + then: { + "* ", + "అప్పుడు ", + }, + and: { + "* ", + "మరియు ", + }, + but: { + "* ", + "కాని ", + }, + }, + map[string]messages.StepKeywordType{ + "చెప్పబడినది ": messages.StepKeywordType_CONTEXT, + + "ఈ పరిస్థితిలో ": messages.StepKeywordType_ACTION, + + "అప్పుడు ": messages.StepKeywordType_OUTCOME, + + "మరియు ": messages.StepKeywordType_CONJUNCTION, + + "కాని ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "tlh": &Dialect{ + "tlh", "Klingon", "tlhIngan", map[string][]string{ + feature: { + "Qap", + "Qu'meH 'ut", + "perbogh", + "poQbogh malja'", + "laH", + }, + rule: { + "Rule", + }, + background: { + "mo'", + }, + scenario: { + "lut", + }, + scenarioOutline: { + "lut chovnatlh", + }, + examples: { + "ghantoH", + "lutmey", + }, + given: { + "* ", + "ghu' noblu' ", + "DaH ghu' bejlu' ", + }, + when: { + "* ", + "qaSDI' ", + }, + then: { + "* ", + "vaj ", + }, + and: { + "* ", + "'ej ", + "latlh ", + }, + but: { + "* ", + "'ach ", + "'a ", + }, + }, + map[string]messages.StepKeywordType{ + "ghu' noblu' ": messages.StepKeywordType_CONTEXT, + + "DaH ghu' bejlu' ": messages.StepKeywordType_CONTEXT, + + "qaSDI' ": messages.StepKeywordType_ACTION, + + "vaj ": messages.StepKeywordType_OUTCOME, + + "'ej ": messages.StepKeywordType_CONJUNCTION, + + "latlh ": messages.StepKeywordType_CONJUNCTION, + + "'ach ": messages.StepKeywordType_CONJUNCTION, + + "'a ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "tr": &Dialect{ + "tr", "Turkish", "Türkçe", map[string][]string{ + feature: { + "Özellik", + }, + rule: { + "Kural", + }, + background: { + "Geçmiş", + }, + scenario: { + "Örnek", + "Senaryo", + }, + scenarioOutline: { + "Senaryo taslağı", + }, + examples: { + "Örnekler", + }, + given: { + "* ", + "Diyelim ki ", + }, + when: { + "* ", + "Eğer ki ", + }, + then: { + "* ", + "O zaman ", + }, + and: { + "* ", + "Ve ", + }, + but: { + "* ", + "Fakat ", + "Ama ", + }, + }, + map[string]messages.StepKeywordType{ + "Diyelim ki ": messages.StepKeywordType_CONTEXT, + + "Eğer ki ": messages.StepKeywordType_ACTION, + + "O zaman ": messages.StepKeywordType_OUTCOME, + + "Ve ": messages.StepKeywordType_CONJUNCTION, + + "Fakat ": messages.StepKeywordType_CONJUNCTION, + + "Ama ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "tt": &Dialect{ + "tt", "Tatar", "Татарча", map[string][]string{ + feature: { + "Мөмкинлек", + "Үзенчәлеклелек", + }, + rule: { + "Rule", + }, + background: { + "Кереш", + }, + scenario: { + "Сценарий", + }, + scenarioOutline: { + "Сценарийның төзелеше", + }, + examples: { + "Үрнәкләр", + "Мисаллар", + }, + given: { + "* ", + "Әйтик ", + }, + when: { + "* ", + "Әгәр ", + }, + then: { + "* ", + "Нәтиҗәдә ", + }, + and: { + "* ", + "Һәм ", + "Вә ", + }, + but: { + "* ", + "Ләкин ", + "Әмма ", + }, + }, + map[string]messages.StepKeywordType{ + "Әйтик ": messages.StepKeywordType_CONTEXT, + + "Әгәр ": messages.StepKeywordType_ACTION, + + "Нәтиҗәдә ": messages.StepKeywordType_OUTCOME, + + "Һәм ": messages.StepKeywordType_CONJUNCTION, + + "Вә ": messages.StepKeywordType_CONJUNCTION, + + "Ләкин ": messages.StepKeywordType_CONJUNCTION, + + "Әмма ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "uk": &Dialect{ + "uk", "Ukrainian", "Українська", map[string][]string{ + feature: { + "Функціонал", + }, + rule: { + "Rule", + }, + background: { + "Передумова", + }, + scenario: { + "Приклад", + "Сценарій", + }, + scenarioOutline: { + "Структура сценарію", + }, + examples: { + "Приклади", + }, + given: { + "* ", + "Припустимо ", + "Припустимо, що ", + "Нехай ", + "Дано ", + }, + when: { + "* ", + "Якщо ", + "Коли ", + }, + then: { + "* ", + "То ", + "Тоді ", + }, + and: { + "* ", + "І ", + "А також ", + "Та ", + }, + but: { + "* ", + "Але ", + }, + }, + map[string]messages.StepKeywordType{ + "Припустимо ": messages.StepKeywordType_CONTEXT, + + "Припустимо, що ": messages.StepKeywordType_CONTEXT, + + "Нехай ": messages.StepKeywordType_CONTEXT, + + "Дано ": messages.StepKeywordType_CONTEXT, + + "Якщо ": messages.StepKeywordType_ACTION, + + "Коли ": messages.StepKeywordType_ACTION, + + "То ": messages.StepKeywordType_OUTCOME, + + "Тоді ": messages.StepKeywordType_OUTCOME, + + "І ": messages.StepKeywordType_CONJUNCTION, + + "А також ": messages.StepKeywordType_CONJUNCTION, + + "Та ": messages.StepKeywordType_CONJUNCTION, + + "Але ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "ur": &Dialect{ + "ur", "Urdu", "اردو", map[string][]string{ + feature: { + "صلاحیت", + "کاروبار کی ضرورت", + "خصوصیت", + }, + rule: { + "Rule", + }, + background: { + "پس منظر", + }, + scenario: { + "منظرنامہ", + }, + scenarioOutline: { + "منظر نامے کا خاکہ", + }, + examples: { + "مثالیں", + }, + given: { + "* ", + "اگر ", + "بالفرض ", + "فرض کیا ", + }, + when: { + "* ", + "جب ", + }, + then: { + "* ", + "پھر ", + "تب ", + }, + and: { + "* ", + "اور ", + }, + but: { + "* ", + "لیکن ", + }, + }, + map[string]messages.StepKeywordType{ + "اگر ": messages.StepKeywordType_CONTEXT, + + "بالفرض ": messages.StepKeywordType_CONTEXT, + + "فرض کیا ": messages.StepKeywordType_CONTEXT, + + "جب ": messages.StepKeywordType_ACTION, + + "پھر ": messages.StepKeywordType_OUTCOME, + + "تب ": messages.StepKeywordType_OUTCOME, + + "اور ": messages.StepKeywordType_CONJUNCTION, + + "لیکن ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "uz": &Dialect{ + "uz", "Uzbek", "Узбекча", map[string][]string{ + feature: { + "Функционал", + }, + rule: { + "Rule", + }, + background: { + "Тарих", + }, + scenario: { + "Сценарий", + }, + scenarioOutline: { + "Сценарий структураси", + }, + examples: { + "Мисоллар", + }, + given: { + "* ", + "Belgilangan ", + }, + when: { + "* ", + "Агар ", + }, + then: { + "* ", + "Унда ", + }, + and: { + "* ", + "Ва ", + }, + but: { + "* ", + "Лекин ", + "Бирок ", + "Аммо ", + }, + }, + map[string]messages.StepKeywordType{ + "Belgilangan ": messages.StepKeywordType_CONTEXT, + + "Агар ": messages.StepKeywordType_ACTION, + + "Унда ": messages.StepKeywordType_OUTCOME, + + "Ва ": messages.StepKeywordType_CONJUNCTION, + + "Лекин ": messages.StepKeywordType_CONJUNCTION, + + "Бирок ": messages.StepKeywordType_CONJUNCTION, + + "Аммо ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "vi": &Dialect{ + "vi", "Vietnamese", "Tiếng Việt", map[string][]string{ + feature: { + "Tính năng", + }, + rule: { + "Rule", + }, + background: { + "Bối cảnh", + }, + scenario: { + "Tình huống", + "Kịch bản", + }, + scenarioOutline: { + "Khung tình huống", + "Khung kịch bản", + }, + examples: { + "Dữ liệu", + }, + given: { + "* ", + "Biết ", + "Cho ", + }, + when: { + "* ", + "Khi ", + }, + then: { + "* ", + "Thì ", + }, + and: { + "* ", + "Và ", + }, + but: { + "* ", + "Nhưng ", + }, + }, + map[string]messages.StepKeywordType{ + "Biết ": messages.StepKeywordType_CONTEXT, + + "Cho ": messages.StepKeywordType_CONTEXT, + + "Khi ": messages.StepKeywordType_ACTION, + + "Thì ": messages.StepKeywordType_OUTCOME, + + "Và ": messages.StepKeywordType_CONJUNCTION, + + "Nhưng ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "zh-CN": &Dialect{ + "zh-CN", "Chinese simplified", "简体中文", map[string][]string{ + feature: { + "功能", + }, + rule: { + "Rule", + "规则", + }, + background: { + "背景", + }, + scenario: { + "场景", + "剧本", + }, + scenarioOutline: { + "场景大纲", + "剧本大纲", + }, + examples: { + "例子", + }, + given: { + "* ", + "假如", + "假设", + "假定", + }, + when: { + "* ", + "当", + }, + then: { + "* ", + "那么", + }, + and: { + "* ", + "而且", + "并且", + "同时", + }, + but: { + "* ", + "但是", + }, + }, + map[string]messages.StepKeywordType{ + "假如": messages.StepKeywordType_CONTEXT, + + "假设": messages.StepKeywordType_CONTEXT, + + "假定": messages.StepKeywordType_CONTEXT, + + "当": messages.StepKeywordType_ACTION, + + "那么": messages.StepKeywordType_OUTCOME, + + "而且": messages.StepKeywordType_CONJUNCTION, + + "并且": messages.StepKeywordType_CONJUNCTION, + + "同时": messages.StepKeywordType_CONJUNCTION, + + "但是": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "zh-TW": &Dialect{ + "zh-TW", "Chinese traditional", "繁體中文", map[string][]string{ + feature: { + "功能", + }, + rule: { + "Rule", + }, + background: { + "背景", + }, + scenario: { + "場景", + "劇本", + }, + scenarioOutline: { + "場景大綱", + "劇本大綱", + }, + examples: { + "例子", + }, + given: { + "* ", + "假如", + "假設", + "假定", + }, + when: { + "* ", + "當", + }, + then: { + "* ", + "那麼", + }, + and: { + "* ", + "而且", + "並且", + "同時", + }, + but: { + "* ", + "但是", + }, + }, + map[string]messages.StepKeywordType{ + "假如": messages.StepKeywordType_CONTEXT, + + "假設": messages.StepKeywordType_CONTEXT, + + "假定": messages.StepKeywordType_CONTEXT, + + "當": messages.StepKeywordType_ACTION, + + "那麼": messages.StepKeywordType_OUTCOME, + + "而且": messages.StepKeywordType_CONJUNCTION, + + "並且": messages.StepKeywordType_CONJUNCTION, + + "同時": messages.StepKeywordType_CONJUNCTION, + + "但是": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "mr": &Dialect{ + "mr", "Marathi", "मराठी", map[string][]string{ + feature: { + "वैशिष्ट्य", + "सुविधा", + }, + rule: { + "नियम", + }, + background: { + "पार्श्वभूमी", + }, + scenario: { + "परिदृश्य", + }, + scenarioOutline: { + "परिदृश्य रूपरेखा", + }, + examples: { + "उदाहरण", + }, + given: { + "* ", + "जर", + "दिलेल्या प्रमाणे ", + }, + when: { + "* ", + "जेव्हा ", + }, + then: { + "* ", + "मग ", + "तेव्हा ", + }, + and: { + "* ", + "आणि ", + "तसेच ", + }, + but: { + "* ", + "पण ", + "परंतु ", + }, + }, + map[string]messages.StepKeywordType{ + "जर": messages.StepKeywordType_CONTEXT, + + "दिलेल्या प्रमाणे ": messages.StepKeywordType_CONTEXT, + + "जेव्हा ": messages.StepKeywordType_ACTION, + + "मग ": messages.StepKeywordType_OUTCOME, + + "तेव्हा ": messages.StepKeywordType_OUTCOME, + + "आणि ": messages.StepKeywordType_CONJUNCTION, + + "तसेच ": messages.StepKeywordType_CONJUNCTION, + + "पण ": messages.StepKeywordType_CONJUNCTION, + + "परंतु ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, + "amh": &Dialect{ + "amh", "Amharic", "አማርኛ", map[string][]string{ + feature: { + "ስራ", + "የተፈለገው ስራ", + "የሚፈለገው ድርጊት", + }, + rule: { + "ህግ", + }, + background: { + "ቅድመ ሁኔታ", + "መነሻ", + "መነሻ ሀሳብ", + }, + scenario: { + "ምሳሌ", + "ሁናቴ", + }, + scenarioOutline: { + "ሁናቴ ዝርዝር", + "ሁናቴ አብነት", + }, + examples: { + "ምሳሌዎች", + "ሁናቴዎች", + }, + given: { + "* ", + "የተሰጠ ", + }, + when: { + "* ", + "መቼ ", + }, + then: { + "* ", + "ከዚያ ", + }, + and: { + "* ", + "እና ", + }, + but: { + "* ", + "ግን ", + }, + }, + map[string]messages.StepKeywordType{ + "የተሰጠ ": messages.StepKeywordType_CONTEXT, + + "መቼ ": messages.StepKeywordType_ACTION, + + "ከዚያ ": messages.StepKeywordType_OUTCOME, + + "እና ": messages.StepKeywordType_CONJUNCTION, + + "ግን ": messages.StepKeywordType_CONJUNCTION, + + "* ": messages.StepKeywordType_UNKNOWN, + }}, +} diff --git a/vendor/github.com/cucumber/gherkin/go/v26/dialects_builtin.go.jq b/vendor/github.com/cucumber/gherkin/go/v26/dialects_builtin.go.jq new file mode 100644 index 000000000..72ef6170f --- /dev/null +++ b/vendor/github.com/cucumber/gherkin/go/v26/dialects_builtin.go.jq @@ -0,0 +1,110 @@ +. as $root +| ( + [ to_entries[] + | [ + "\t",(.key|@json),": &Dialect{\n", + "\t\t", (.key|@json),", ", (.value.name|@json),", ", (.value.native|@json), ", map[string][]string{\n" + ] + ( + [ .value + | {"feature","rule","background","scenario","scenarioOutline","examples","given","when","then","and","but"} + | to_entries[] + | "\t\t\t"+(.key), ": {\n", + ([ .value[] | "\t\t\t\t", @json, ",\n" ]|add), + "\t\t\t},\n" + ] + ) + [ + "\t\t},\n", + "\t\tmap[string]messages.StepKeywordType{\n" + ] + ( + [ .value.given + | ( + [ .[] | select(. != "* ") | + "\t\t\t", + @json, + ": messages.StepKeywordType_CONTEXT", + ",\n\n" + ] | add + ), + "" + ] + + + [ .value.when + | ( + [ .[] | select(. != "* ") | + "\t\t\t", + @json, + ": messages.StepKeywordType_ACTION", + ",\n\n" + ] | add + ), + "" + ] + + + [ .value.then + | ( + [ .[] | select(. != "* ") | + "\t\t\t", + @json, + ": messages.StepKeywordType_OUTCOME", + ",\n\n" + ] | add + ), + "" + ] + + + [ .value.and + | ( + [ .[] | select(. != "* ") | + "\t\t\t", + @json, + ": messages.StepKeywordType_CONJUNCTION", + ",\n\n" + ] | add + ), + "" + ] + + + [ .value.but + | ( + [ .[] | select(. != "* ") | + "\t\t\t", + @json, + ": messages.StepKeywordType_CONJUNCTION", + ",\n\n" + ] | add + ), + "" + ] + + [ + "\t\t\t\"* \": messages.StepKeywordType_UNKNOWN,\n" + ] + ) + [ + "\t\t}", + "},\n" + ] + | add + ] + | add + ) +| "package gherkin\n\n" ++ "import messages \"github.com/cucumber/messages/go/v21\"\n\n" ++ "// Builtin dialects for " + ([ $root | to_entries[] | .key+" ("+.value.name+")" ] | join(", ")) + "\n" ++ "func DialectsBuiltin() DialectProvider {\n" ++ "\treturn builtinDialects\n" ++ "}\n\n" ++ "const (\n" ++ " feature = \"feature\"\n" ++ " rule = \"rule\"\n" ++ " background = \"background\"\n" ++ " scenario = \"scenario\"\n" ++ " scenarioOutline = \"scenarioOutline\"\n" ++ " examples = \"examples\"\n" ++ " given = \"given\"\n" ++ " when = \"when\"\n" ++ " then = \"then\"\n" ++ " and = \"and\"\n" ++ " but = \"but\"\n" ++ ")\n\n" ++ "var builtinDialects = gherkinDialectMap{\n" ++ . ++ "}" diff --git a/vendor/github.com/cucumber/gherkin/go/v26/gherkin.go b/vendor/github.com/cucumber/gherkin/go/v26/gherkin.go new file mode 100644 index 000000000..524d16e11 --- /dev/null +++ b/vendor/github.com/cucumber/gherkin/go/v26/gherkin.go @@ -0,0 +1,143 @@ +package gherkin + +import ( + "bufio" + "fmt" + "github.com/cucumber/messages/go/v21" + "io" + "strings" +) + +type Parser interface { + StopAtFirstError(b bool) + Parse(s Scanner, m Matcher) (err error) +} + +/* +The Scanner reads a gherkin doc (typically read from a .feature file) and creates a token for +each line. The tokens are passed to the parser, which outputs an AST (Abstract Syntax Tree). + +If the scanner sees a # language header, it will reconfigure itself dynamically to look for +Gherkin keywords for the associated language. The keywords are defined in gherkin-languages.json. +*/ +type Scanner interface { + Scan() (line *Line, atEof bool, err error) +} + +type Builder interface { + Build(*Token) (bool, error) + StartRule(RuleType) (bool, error) + EndRule(RuleType) (bool, error) + Reset() +} + +type Token struct { + Type TokenType + Keyword string + KeywordType messages.StepKeywordType + Text string + Items []*LineSpan + GherkinDialect string + Indent string + Location *Location +} + +func (t *Token) IsEOF() bool { + return t.Type == TokenTypeEOF +} +func (t *Token) String() string { + return fmt.Sprintf("%v: %s/%s", t.Type, t.Keyword, t.Text) +} + +type LineSpan struct { + Column int + Text string +} + +func (l *LineSpan) String() string { + return fmt.Sprintf("%d:%s", l.Column, l.Text) +} + +type parser struct { + builder Builder + stopAtFirstError bool +} + +func NewParser(b Builder) Parser { + return &parser{ + builder: b, + } +} + +func (p *parser) StopAtFirstError(b bool) { + p.stopAtFirstError = b +} + +func NewScanner(r io.Reader) Scanner { + return &scanner{ + s: bufio.NewScanner(r), + line: 0, + } +} + +type scanner struct { + s *bufio.Scanner + line int +} + +func (t *scanner) Scan() (line *Line, atEof bool, err error) { + scanning := t.s.Scan() + if !scanning { + err = t.s.Err() + if err == nil { + atEof = true + } + } + if err == nil { + t.line += 1 + str := t.s.Text() + line = &Line{str, t.line, strings.TrimLeft(str, " \t"), atEof} + } + return +} + +type Line struct { + LineText string + LineNumber int + TrimmedLineText string + AtEof bool +} + +func (g *Line) Indent() int { + return len(g.LineText) - len(g.TrimmedLineText) +} + +func (g *Line) IsEmpty() bool { + return len(g.TrimmedLineText) == 0 +} + +func (g *Line) IsEof() bool { + return g.AtEof +} + +func (g *Line) StartsWith(prefix string) bool { + return strings.HasPrefix(g.TrimmedLineText, prefix) +} + +func ParseGherkinDocument(in io.Reader, newId func() string) (gherkinDocument *messages.GherkinDocument, err error) { + return ParseGherkinDocumentForLanguage(in, DefaultDialect, newId) +} + +func ParseGherkinDocumentForLanguage(in io.Reader, language string, newId func() string) (gherkinDocument *messages.GherkinDocument, err error) { + + builder := NewAstBuilder(newId) + parser := NewParser(builder) + parser.StopAtFirstError(false) + matcher := NewLanguageMatcher(DialectsBuiltin(), language) + + scanner := NewScanner(in) + + err = parser.Parse(scanner, matcher) + + return builder.GetGherkinDocument(), err +} diff --git a/vendor/github.com/cucumber/gherkin/go/v26/matcher.go b/vendor/github.com/cucumber/gherkin/go/v26/matcher.go new file mode 100644 index 000000000..fda4e6852 --- /dev/null +++ b/vendor/github.com/cucumber/gherkin/go/v26/matcher.go @@ -0,0 +1,301 @@ +package gherkin + +import ( + "regexp" + "strings" + "unicode" + "unicode/utf8" +) + +const ( + DefaultDialect = "en" + CommentPrefix = "#" + TagPrefix = "@" + TitleKeywordSeparator = ":" + TableCellSeparator = '|' + EscapeChar = '\\' + EscapedNewline = 'n' + DocstringSeparator = "\"\"\"" + DocstringAlternativeSeparator = "```" +) + +type matcher struct { + gdp DialectProvider + defaultLang string + lang string + dialect *Dialect + activeDocStringSeparator string + indentToRemove int + languagePattern *regexp.Regexp +} + +func NewMatcher(gdp DialectProvider) Matcher { + return &matcher{ + gdp: gdp, + defaultLang: DefaultDialect, + lang: DefaultDialect, + dialect: gdp.GetDialect(DefaultDialect), + languagePattern: regexp.MustCompile("^\\s*#\\s*language\\s*:\\s*([a-zA-Z\\-_]+)\\s*$"), + } +} + +func NewLanguageMatcher(gdp DialectProvider, language string) Matcher { + return &matcher{ + gdp: gdp, + defaultLang: language, + lang: language, + dialect: gdp.GetDialect(language), + languagePattern: regexp.MustCompile("^\\s*#\\s*language\\s*:\\s*([a-zA-Z\\-_]+)\\s*$"), + } +} + +func (m *matcher) Reset() { + m.indentToRemove = 0 + m.activeDocStringSeparator = "" + if m.lang != "en" { + m.dialect = m.gdp.GetDialect(m.defaultLang) + m.lang = "en" + } +} + +func (m *matcher) newTokenAtLocation(line, index int) (token *Token) { + column := index + 1 + token = new(Token) + token.GherkinDialect = m.lang + token.Location = &Location{line, column} + return +} + +func (m *matcher) MatchEOF(line *Line) (ok bool, token *Token, err error) { + if line.IsEof() { + token, ok = m.newTokenAtLocation(line.LineNumber, line.Indent()), true + token.Type = TokenTypeEOF + } + return +} + +func (m *matcher) MatchEmpty(line *Line) (ok bool, token *Token, err error) { + if line.IsEmpty() { + token, ok = m.newTokenAtLocation(line.LineNumber, line.Indent()), true + token.Type = TokenTypeEmpty + } + return +} + +func (m *matcher) MatchComment(line *Line) (ok bool, token *Token, err error) { + if line.StartsWith(CommentPrefix) { + token, ok = m.newTokenAtLocation(line.LineNumber, 0), true + token.Type = TokenTypeComment + token.Text = line.LineText + } + return +} + +func (m *matcher) MatchTagLine(line *Line) (ok bool, token *Token, err error) { + if !line.StartsWith(TagPrefix) { + return + } + commentDelimiter := regexp.MustCompile(`\s+` + CommentPrefix) + uncommentedLine := commentDelimiter.Split(line.TrimmedLineText, 2)[0] + var tags []*LineSpan + var column = line.Indent() + 1 + + splits := strings.Split(uncommentedLine, TagPrefix) + for i := range splits { + txt := strings.TrimRightFunc(splits[i], func(r rune) bool { + return unicode.IsSpace(r) + }) + if len(txt) == 0 { + continue + } + if !regexp.MustCompile(`^\S+$`).MatchString(txt) { + location := &Location{line.LineNumber, column} + msg := "A tag may not contain whitespace" + err = &parseError{msg, location} + break + } + tags = append(tags, &LineSpan{column, TagPrefix + txt}) + column = column + utf8.RuneCountInString(splits[i]) + 1 + } + + token, ok = m.newTokenAtLocation(line.LineNumber, line.Indent()), true + token.Type = TokenTypeTagLine + token.Items = tags + + return +} + +func (m *matcher) matchTitleLine(line *Line, tokenType TokenType, keywords []string) (ok bool, token *Token, err error) { + for i := range keywords { + keyword := keywords[i] + if line.StartsWith(keyword + TitleKeywordSeparator) { + token, ok = m.newTokenAtLocation(line.LineNumber, line.Indent()), true + token.Type = tokenType + token.Keyword = keyword + token.Text = strings.Trim(line.TrimmedLineText[len(keyword)+1:], " ") + return + } + } + return +} + +func (m *matcher) MatchFeatureLine(line *Line) (ok bool, token *Token, err error) { + return m.matchTitleLine(line, TokenTypeFeatureLine, m.dialect.FeatureKeywords()) +} +func (m *matcher) MatchRuleLine(line *Line) (ok bool, token *Token, err error) { + return m.matchTitleLine(line, TokenTypeRuleLine, m.dialect.RuleKeywords()) +} +func (m *matcher) MatchBackgroundLine(line *Line) (ok bool, token *Token, err error) { + return m.matchTitleLine(line, TokenTypeBackgroundLine, m.dialect.BackgroundKeywords()) +} +func (m *matcher) MatchScenarioLine(line *Line) (ok bool, token *Token, err error) { + ok, token, err = m.matchTitleLine(line, TokenTypeScenarioLine, m.dialect.ScenarioKeywords()) + if ok || (err != nil) { + return ok, token, err + } + ok, token, err = m.matchTitleLine(line, TokenTypeScenarioLine, m.dialect.ScenarioOutlineKeywords()) + return ok, token, err +} +func (m *matcher) MatchExamplesLine(line *Line) (ok bool, token *Token, err error) { + return m.matchTitleLine(line, TokenTypeExamplesLine, m.dialect.ExamplesKeywords()) +} +func (m *matcher) MatchStepLine(line *Line) (ok bool, token *Token, err error) { + keywords := m.dialect.StepKeywords() + for i := range keywords { + keyword := keywords[i] + if line.StartsWith(keyword) { + token, ok = m.newTokenAtLocation(line.LineNumber, line.Indent()), true + token.Type = TokenTypeStepLine + token.Keyword = keyword + token.KeywordType = m.dialect.StepKeywordType(keyword) + token.Text = strings.Trim(line.TrimmedLineText[len(keyword):], " ") + return + } + } + return +} + +func (m *matcher) MatchDocStringSeparator(line *Line) (ok bool, token *Token, err error) { + if m.activeDocStringSeparator != "" { + if line.StartsWith(m.activeDocStringSeparator) { + // close + token, ok = m.newTokenAtLocation(line.LineNumber, line.Indent()), true + token.Type = TokenTypeDocStringSeparator + token.Keyword = m.activeDocStringSeparator + + m.indentToRemove = 0 + m.activeDocStringSeparator = "" + } + return + } + if line.StartsWith(DocstringSeparator) { + m.activeDocStringSeparator = DocstringSeparator + } else if line.StartsWith(DocstringAlternativeSeparator) { + m.activeDocStringSeparator = DocstringAlternativeSeparator + } + if m.activeDocStringSeparator != "" { + // open + mediaType := line.TrimmedLineText[len(m.activeDocStringSeparator):] + m.indentToRemove = line.Indent() + token, ok = m.newTokenAtLocation(line.LineNumber, line.Indent()), true + token.Type = TokenTypeDocStringSeparator + token.Keyword = m.activeDocStringSeparator + token.Text = mediaType + } + return +} + +func isSpaceAndNotNewLine(r rune) bool { + return unicode.IsSpace(r) && r != '\n' +} + +func (m *matcher) MatchTableRow(line *Line) (ok bool, token *Token, err error) { + var firstChar, firstPos = utf8.DecodeRuneInString(line.TrimmedLineText) + if firstChar == TableCellSeparator { + var cells []*LineSpan + var cell []rune + var startCol = line.Indent() + 2 // column where the current cell started + // start after the first separator, it's not included in the cell + for i, w, col := firstPos, 0, startCol; i < len(line.TrimmedLineText); i += w { + var char rune + char, w = utf8.DecodeRuneInString(line.TrimmedLineText[i:]) + if char == TableCellSeparator { + // append current cell + txt := string(cell) + + txtTrimmedLeadingSpace := strings.TrimLeftFunc(txt, isSpaceAndNotNewLine) + ind := utf8.RuneCountInString(txt) - utf8.RuneCountInString(txtTrimmedLeadingSpace) + txtTrimmed := strings.TrimRightFunc(txtTrimmedLeadingSpace, isSpaceAndNotNewLine) + cells = append(cells, &LineSpan{startCol + ind, txtTrimmed}) + // start building next + cell = make([]rune, 0) + startCol = col + 1 + } else if char == EscapeChar { + // skip this character but count the column + i += w + col++ + char, w = utf8.DecodeRuneInString(line.TrimmedLineText[i:]) + if char == EscapedNewline { + cell = append(cell, '\n') + } else { + if char != TableCellSeparator && char != EscapeChar { + cell = append(cell, EscapeChar) + } + cell = append(cell, char) + } + } else { + cell = append(cell, char) + } + col++ + } + + token, ok = m.newTokenAtLocation(line.LineNumber, line.Indent()), true + token.Type = TokenTypeTableRow + token.Items = cells + } + return +} + +func (m *matcher) MatchLanguage(line *Line) (ok bool, token *Token, err error) { + matches := m.languagePattern.FindStringSubmatch(line.TrimmedLineText) + if len(matches) > 0 { + lang := matches[1] + token, ok = m.newTokenAtLocation(line.LineNumber, line.Indent()), true + token.Type = TokenTypeLanguage + token.Text = lang + + dialect := m.gdp.GetDialect(lang) + if dialect == nil { + err = &parseError{"Language not supported: " + lang, token.Location} + } else { + m.lang = lang + m.dialect = dialect + } + } + return +} + +func (m *matcher) MatchOther(line *Line) (ok bool, token *Token, err error) { + token, ok = m.newTokenAtLocation(line.LineNumber, 0), true + token.Type = TokenTypeOther + + element := line.LineText + txt := strings.TrimLeft(element, " ") + + if len(element)-len(txt) > m.indentToRemove { + token.Text = m.unescapeDocString(element[m.indentToRemove:]) + } else { + token.Text = m.unescapeDocString(txt) + } + return +} + +func (m *matcher) unescapeDocString(text string) string { + if m.activeDocStringSeparator == DocstringSeparator { + return strings.Replace(text, "\\\"\\\"\\\"", DocstringSeparator, -1) + } + if m.activeDocStringSeparator == DocstringAlternativeSeparator { + return strings.Replace(text, "\\`\\`\\`", DocstringAlternativeSeparator, -1) + } + return text +} diff --git a/vendor/github.com/cucumber/gherkin/go/v26/messages.go b/vendor/github.com/cucumber/gherkin/go/v26/messages.go new file mode 100644 index 000000000..a3b7c1b71 --- /dev/null +++ b/vendor/github.com/cucumber/gherkin/go/v26/messages.go @@ -0,0 +1,120 @@ +package gherkin + +import ( + "encoding/json" + "fmt" + "github.com/cucumber/messages/go/v21" + "io" + "io/ioutil" + "strings" +) + +func Messages( + paths []string, + decoder *json.Decoder, + language string, + includeSource bool, + includeGherkinDocument bool, + includePickles bool, + encoder *json.Encoder, + newId func() string, +) ([]messages.Envelope, error) { + var result []messages.Envelope + var err error + + handleMessage := func(result []messages.Envelope, message *messages.Envelope) ([]messages.Envelope, error) { + if encoder != nil { + err = encoder.Encode(message) + return result, err + } else { + result = append(result, *message) + } + + return result, err + } + + processSource := func(source *messages.Source) error { + if includeSource { + result, err = handleMessage(result, &messages.Envelope{ + Source: source, + }) + } + doc, err := ParseGherkinDocumentForLanguage(strings.NewReader(source.Data), language, newId) + if errs, ok := err.(parseErrors); ok { + // expected parse errors + for _, err := range errs { + if pe, ok := err.(*parseError); ok { + result, err = handleMessage(result, pe.asMessage(source.Uri)) + } else { + return fmt.Errorf("parse feature file: %s, unexpected error: %+v\n", source.Uri, err) + } + } + return nil + } + + if includeGherkinDocument { + doc.Uri = source.Uri + result, err = handleMessage(result, &messages.Envelope{ + GherkinDocument: doc, + }) + } + + if includePickles { + for _, pickle := range Pickles(*doc, source.Uri, newId) { + result, err = handleMessage(result, &messages.Envelope{ + Pickle: pickle, + }) + } + } + return nil + } + + if len(paths) == 0 { + for { + envelope := &messages.Envelope{} + err := decoder.Decode(envelope) + //marshal, err := json.Marshal(envelope) + //fmt.Println(string(marshal)) + if err == io.EOF { + break + } + + if envelope.Source != nil { + err = processSource(envelope.Source) + if err != nil { + return result, err + } + } + } + } else { + for _, path := range paths { + in, err := ioutil.ReadFile(path) + if err != nil { + return result, fmt.Errorf("read feature file: %s - %+v", path, err) + } + source := &messages.Source{ + Uri: path, + Data: string(in), + MediaType: "text/x.cucumber.gherkin+plain", + } + processSource(source) + } + } + + return result, err +} + +func (a *parseError) asMessage(uri string) *messages.Envelope { + return &messages.Envelope{ + ParseError: &messages.ParseError{ + Message: a.Error(), + Source: &messages.SourceReference{ + Uri: uri, + Location: &messages.Location{ + Line: int64(a.loc.Line), + Column: int64(a.loc.Column), + }, + }, + }, + } +} diff --git a/vendor/github.com/cucumber/gherkin/go/v26/parser.go b/vendor/github.com/cucumber/gherkin/go/v26/parser.go new file mode 100644 index 000000000..570e4babe --- /dev/null +++ b/vendor/github.com/cucumber/gherkin/go/v26/parser.go @@ -0,0 +1,4654 @@ +// +// This file is generated. Do not edit! Edit parser.go.razor instead. + +package gherkin + +import ( + "fmt" + "strings" +) + +type TokenType int + +const ( + TokenTypeNone TokenType = iota + TokenTypeEOF + TokenTypeEmpty + TokenTypeComment + TokenTypeTagLine + TokenTypeFeatureLine + TokenTypeRuleLine + TokenTypeBackgroundLine + TokenTypeScenarioLine + TokenTypeExamplesLine + TokenTypeStepLine + TokenTypeDocStringSeparator + TokenTypeTableRow + TokenTypeLanguage + TokenTypeOther +) + +func tokenTypeForRule(rt RuleType) TokenType { + return TokenTypeNone +} + +func (t TokenType) Name() string { + switch t { + case TokenTypeEOF: + return "EOF" + case TokenTypeEmpty: + return "Empty" + case TokenTypeComment: + return "Comment" + case TokenTypeTagLine: + return "TagLine" + case TokenTypeFeatureLine: + return "FeatureLine" + case TokenTypeRuleLine: + return "RuleLine" + case TokenTypeBackgroundLine: + return "BackgroundLine" + case TokenTypeScenarioLine: + return "ScenarioLine" + case TokenTypeExamplesLine: + return "ExamplesLine" + case TokenTypeStepLine: + return "StepLine" + case TokenTypeDocStringSeparator: + return "DocStringSeparator" + case TokenTypeTableRow: + return "TableRow" + case TokenTypeLanguage: + return "Language" + case TokenTypeOther: + return "Other" + } + return "" +} + +func (t TokenType) RuleType() RuleType { + switch t { + case TokenTypeEOF: + return RuleTypeEOF + case TokenTypeEmpty: + return RuleTypeEmpty + case TokenTypeComment: + return RuleTypeComment + case TokenTypeTagLine: + return RuleTypeTagLine + case TokenTypeFeatureLine: + return RuleTypeFeatureLine + case TokenTypeRuleLine: + return RuleTypeRuleLine + case TokenTypeBackgroundLine: + return RuleTypeBackgroundLine + case TokenTypeScenarioLine: + return RuleTypeScenarioLine + case TokenTypeExamplesLine: + return RuleTypeExamplesLine + case TokenTypeStepLine: + return RuleTypeStepLine + case TokenTypeDocStringSeparator: + return RuleTypeDocStringSeparator + case TokenTypeTableRow: + return RuleTypeTableRow + case TokenTypeLanguage: + return RuleTypeLanguage + case TokenTypeOther: + return RuleTypeOther + } + return RuleTypeNone +} + +type RuleType int + +const ( + RuleTypeNone RuleType = iota + + RuleTypeEOF + RuleTypeEmpty + RuleTypeComment + RuleTypeTagLine + RuleTypeFeatureLine + RuleTypeRuleLine + RuleTypeBackgroundLine + RuleTypeScenarioLine + RuleTypeExamplesLine + RuleTypeStepLine + RuleTypeDocStringSeparator + RuleTypeTableRow + RuleTypeLanguage + RuleTypeOther + RuleTypeGherkinDocument + RuleTypeFeature + RuleTypeFeatureHeader + RuleTypeRule + RuleTypeRuleHeader + RuleTypeBackground + RuleTypeScenarioDefinition + RuleTypeScenario + RuleTypeExamplesDefinition + RuleTypeExamples + RuleTypeExamplesTable + RuleTypeStep + RuleTypeStepArg + RuleTypeDataTable + RuleTypeDocString + RuleTypeTags + RuleTypeDescriptionHelper + RuleTypeDescription +) + +func (t RuleType) IsEOF() bool { + return t == RuleTypeEOF +} +func (t RuleType) Name() string { + switch t { + case RuleTypeEOF: + return "#EOF" + case RuleTypeEmpty: + return "#Empty" + case RuleTypeComment: + return "#Comment" + case RuleTypeTagLine: + return "#TagLine" + case RuleTypeFeatureLine: + return "#FeatureLine" + case RuleTypeRuleLine: + return "#RuleLine" + case RuleTypeBackgroundLine: + return "#BackgroundLine" + case RuleTypeScenarioLine: + return "#ScenarioLine" + case RuleTypeExamplesLine: + return "#ExamplesLine" + case RuleTypeStepLine: + return "#StepLine" + case RuleTypeDocStringSeparator: + return "#DocStringSeparator" + case RuleTypeTableRow: + return "#TableRow" + case RuleTypeLanguage: + return "#Language" + case RuleTypeOther: + return "#Other" + case RuleTypeGherkinDocument: + return "GherkinDocument" + case RuleTypeFeature: + return "Feature" + case RuleTypeFeatureHeader: + return "FeatureHeader" + case RuleTypeRule: + return "Rule" + case RuleTypeRuleHeader: + return "RuleHeader" + case RuleTypeBackground: + return "Background" + case RuleTypeScenarioDefinition: + return "ScenarioDefinition" + case RuleTypeScenario: + return "Scenario" + case RuleTypeExamplesDefinition: + return "ExamplesDefinition" + case RuleTypeExamples: + return "Examples" + case RuleTypeExamplesTable: + return "ExamplesTable" + case RuleTypeStep: + return "Step" + case RuleTypeStepArg: + return "StepArg" + case RuleTypeDataTable: + return "DataTable" + case RuleTypeDocString: + return "DocString" + case RuleTypeTags: + return "Tags" + case RuleTypeDescriptionHelper: + return "DescriptionHelper" + case RuleTypeDescription: + return "Description" + } + return "" +} + +type Location struct { + Line int + Column int +} + +type parseError struct { + msg string + loc *Location +} + +func (a *parseError) Error() string { + return fmt.Sprintf("(%d:%d): %s", a.loc.Line, a.loc.Column, a.msg) +} + +type parseErrors []error + +func (pe parseErrors) Error() string { + var ret = []string{"Parser errors:"} + for i := range pe { + ret = append(ret, pe[i].Error()) + } + return strings.Join(ret, "\n") +} + +func (p *parser) Parse(s Scanner, m Matcher) (err error) { + p.builder.Reset() + m.Reset() + ctxt := &parseContext{p, s, p.builder, m, nil, nil} + var state int + ctxt.startRule(RuleTypeGherkinDocument) + for { + gl, eof, err := ctxt.scan() + if err != nil { + ctxt.addError(err) + if p.stopAtFirstError { + break + } + } + state, err = ctxt.match(state, gl) + if err != nil { + ctxt.addError(err) + if p.stopAtFirstError { + break + } + } + if eof { + // done! \o/ + break + } + } + ctxt.endRule(RuleTypeGherkinDocument) + if len(ctxt.errors) > 0 { + return ctxt.errors + } + return +} + +type parseContext struct { + p *parser + s Scanner + b Builder + m Matcher + queue []*scanResult + errors parseErrors +} + +func (ctxt *parseContext) addError(e error) { + ctxt.errors = append(ctxt.errors, e) + // if (p.errors.length > 10) + // throw Errors.CompositeParserException.create(p.errors); +} + +type scanResult struct { + line *Line + atEof bool + err error +} + +func (ctxt *parseContext) scan() (*Line, bool, error) { + l := len(ctxt.queue) + if l > 0 { + x := ctxt.queue[0] + ctxt.queue = ctxt.queue[1:] + return x.line, x.atEof, x.err + } + return ctxt.s.Scan() +} + +func (ctxt *parseContext) startRule(r RuleType) (bool, error) { + ok, err := ctxt.b.StartRule(r) + if err != nil { + ctxt.addError(err) + } + return ok, err +} + +func (ctxt *parseContext) endRule(r RuleType) (bool, error) { + ok, err := ctxt.b.EndRule(r) + if err != nil { + ctxt.addError(err) + } + return ok, err +} + +func (ctxt *parseContext) build(t *Token) (bool, error) { + ok, err := ctxt.b.Build(t) + if err != nil { + ctxt.addError(err) + } + return ok, err +} + +func (ctxt *parseContext) match(state int, line *Line) (newState int, err error) { + switch state { + case 0: + return ctxt.matchAt0(line) + case 1: + return ctxt.matchAt1(line) + case 2: + return ctxt.matchAt2(line) + case 3: + return ctxt.matchAt3(line) + case 4: + return ctxt.matchAt4(line) + case 5: + return ctxt.matchAt5(line) + case 6: + return ctxt.matchAt6(line) + case 7: + return ctxt.matchAt7(line) + case 8: + return ctxt.matchAt8(line) + case 9: + return ctxt.matchAt9(line) + case 10: + return ctxt.matchAt10(line) + case 11: + return ctxt.matchAt11(line) + case 12: + return ctxt.matchAt12(line) + case 13: + return ctxt.matchAt13(line) + case 14: + return ctxt.matchAt14(line) + case 15: + return ctxt.matchAt15(line) + case 16: + return ctxt.matchAt16(line) + case 17: + return ctxt.matchAt17(line) + case 18: + return ctxt.matchAt18(line) + case 19: + return ctxt.matchAt19(line) + case 20: + return ctxt.matchAt20(line) + case 21: + return ctxt.matchAt21(line) + case 22: + return ctxt.matchAt22(line) + case 23: + return ctxt.matchAt23(line) + case 24: + return ctxt.matchAt24(line) + case 25: + return ctxt.matchAt25(line) + case 26: + return ctxt.matchAt26(line) + case 27: + return ctxt.matchAt27(line) + case 28: + return ctxt.matchAt28(line) + case 29: + return ctxt.matchAt29(line) + case 30: + return ctxt.matchAt30(line) + case 31: + return ctxt.matchAt31(line) + case 32: + return ctxt.matchAt32(line) + case 33: + return ctxt.matchAt33(line) + case 34: + return ctxt.matchAt34(line) + case 35: + return ctxt.matchAt35(line) + case 36: + return ctxt.matchAt36(line) + case 37: + return ctxt.matchAt37(line) + case 38: + return ctxt.matchAt38(line) + case 39: + return ctxt.matchAt39(line) + case 40: + return ctxt.matchAt40(line) + case 41: + return ctxt.matchAt41(line) + case 43: + return ctxt.matchAt43(line) + case 44: + return ctxt.matchAt44(line) + case 45: + return ctxt.matchAt45(line) + case 46: + return ctxt.matchAt46(line) + case 47: + return ctxt.matchAt47(line) + case 48: + return ctxt.matchAt48(line) + case 49: + return ctxt.matchAt49(line) + case 50: + return ctxt.matchAt50(line) + default: + return state, fmt.Errorf("Unknown state: %+v", state) + } +} + +// Start +func (ctxt *parseContext) matchAt0(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchLanguage(line); ok { + ctxt.startRule(RuleTypeFeature) + ctxt.startRule(RuleTypeFeatureHeader) + ctxt.build(token) + return 1, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.startRule(RuleTypeFeature) + ctxt.startRule(RuleTypeFeatureHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 2, err + } + if ok, token, err := ctxt.matchFeatureLine(line); ok { + ctxt.startRule(RuleTypeFeature) + ctxt.startRule(RuleTypeFeatureHeader) + ctxt.build(token) + return 3, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 0, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 0, err + } + + // var stateComment = "State: 0 - Start" + var expectedTokens = []string{"#EOF", "#Language", "#TagLine", "#FeatureLine", "#Comment", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 0, err +} + +// GherkinDocument:0>Feature:0>FeatureHeader:0>#Language:0 +func (ctxt *parseContext) matchAt1(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 2, err + } + if ok, token, err := ctxt.matchFeatureLine(line); ok { + ctxt.build(token) + return 3, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 1, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 1, err + } + + // var stateComment = "State: 1 - GherkinDocument:0>Feature:0>FeatureHeader:0>#Language:0" + var expectedTokens = []string{"#TagLine", "#FeatureLine", "#Comment", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 1, err +} + +// GherkinDocument:0>Feature:0>FeatureHeader:1>Tags:0>#TagLine:0 +func (ctxt *parseContext) matchAt2(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.build(token) + return 2, err + } + if ok, token, err := ctxt.matchFeatureLine(line); ok { + ctxt.endRule(RuleTypeTags) + ctxt.build(token) + return 3, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 2, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 2, err + } + + // var stateComment = "State: 2 - GherkinDocument:0>Feature:0>FeatureHeader:1>Tags:0>#TagLine:0" + var expectedTokens = []string{"#TagLine", "#FeatureLine", "#Comment", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 2, err +} + +// GherkinDocument:0>Feature:0>FeatureHeader:2>#FeatureLine:0 +func (ctxt *parseContext) matchAt3(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeFeatureHeader) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 3, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 5, err + } + if ok, token, err := ctxt.matchBackgroundLine(line); ok { + ctxt.endRule(RuleTypeFeatureHeader) + ctxt.startRule(RuleTypeBackground) + ctxt.build(token) + return 6, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeFeatureHeader) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 11, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeFeatureHeader) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeFeatureHeader) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 12, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeFeatureHeader) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchOther(line); ok { + ctxt.startRule(RuleTypeDescription) + ctxt.build(token) + return 4, err + } + + // var stateComment = "State: 3 - GherkinDocument:0>Feature:0>FeatureHeader:2>#FeatureLine:0" + var expectedTokens = []string{"#EOF", "#Empty", "#Comment", "#BackgroundLine", "#TagLine", "#ScenarioLine", "#RuleLine", "#Other"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 3, err +} + +// GherkinDocument:0>Feature:0>FeatureHeader:3>DescriptionHelper:1>Description:0>#Other:0 +func (ctxt *parseContext) matchAt4(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeFeatureHeader) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.build(token) + return 5, err + } + if ok, token, err := ctxt.matchBackgroundLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeFeatureHeader) + ctxt.startRule(RuleTypeBackground) + ctxt.build(token) + return 6, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeFeatureHeader) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 11, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeFeatureHeader) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeFeatureHeader) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 12, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeFeatureHeader) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchOther(line); ok { + ctxt.build(token) + return 4, err + } + + // var stateComment = "State: 4 - GherkinDocument:0>Feature:0>FeatureHeader:3>DescriptionHelper:1>Description:0>#Other:0" + var expectedTokens = []string{"#EOF", "#Comment", "#BackgroundLine", "#TagLine", "#ScenarioLine", "#RuleLine", "#Other"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 4, err +} + +// GherkinDocument:0>Feature:0>FeatureHeader:3>DescriptionHelper:2>#Comment:0 +func (ctxt *parseContext) matchAt5(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeFeatureHeader) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 5, err + } + if ok, token, err := ctxt.matchBackgroundLine(line); ok { + ctxt.endRule(RuleTypeFeatureHeader) + ctxt.startRule(RuleTypeBackground) + ctxt.build(token) + return 6, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeFeatureHeader) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 11, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeFeatureHeader) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeFeatureHeader) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 12, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeFeatureHeader) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 5, err + } + + // var stateComment = "State: 5 - GherkinDocument:0>Feature:0>FeatureHeader:3>DescriptionHelper:2>#Comment:0" + var expectedTokens = []string{"#EOF", "#Comment", "#BackgroundLine", "#TagLine", "#ScenarioLine", "#RuleLine", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 5, err +} + +// GherkinDocument:0>Feature:1>Background:0>#BackgroundLine:0 +func (ctxt *parseContext) matchAt6(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeBackground) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 6, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 8, err + } + if ok, token, err := ctxt.matchStepLine(line); ok { + ctxt.startRule(RuleTypeStep) + ctxt.build(token) + return 9, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 11, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 12, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchOther(line); ok { + ctxt.startRule(RuleTypeDescription) + ctxt.build(token) + return 7, err + } + + // var stateComment = "State: 6 - GherkinDocument:0>Feature:1>Background:0>#BackgroundLine:0" + var expectedTokens = []string{"#EOF", "#Empty", "#Comment", "#StepLine", "#TagLine", "#ScenarioLine", "#RuleLine", "#Other"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 6, err +} + +// GherkinDocument:0>Feature:1>Background:1>DescriptionHelper:1>Description:0>#Other:0 +func (ctxt *parseContext) matchAt7(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeBackground) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.build(token) + return 8, err + } + if ok, token, err := ctxt.matchStepLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.startRule(RuleTypeStep) + ctxt.build(token) + return 9, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 11, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 12, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchOther(line); ok { + ctxt.build(token) + return 7, err + } + + // var stateComment = "State: 7 - GherkinDocument:0>Feature:1>Background:1>DescriptionHelper:1>Description:0>#Other:0" + var expectedTokens = []string{"#EOF", "#Comment", "#StepLine", "#TagLine", "#ScenarioLine", "#RuleLine", "#Other"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 7, err +} + +// GherkinDocument:0>Feature:1>Background:1>DescriptionHelper:2>#Comment:0 +func (ctxt *parseContext) matchAt8(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeBackground) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 8, err + } + if ok, token, err := ctxt.matchStepLine(line); ok { + ctxt.startRule(RuleTypeStep) + ctxt.build(token) + return 9, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 11, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 12, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 8, err + } + + // var stateComment = "State: 8 - GherkinDocument:0>Feature:1>Background:1>DescriptionHelper:2>#Comment:0" + var expectedTokens = []string{"#EOF", "#Comment", "#StepLine", "#TagLine", "#ScenarioLine", "#RuleLine", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 8, err +} + +// GherkinDocument:0>Feature:1>Background:2>Step:0>#StepLine:0 +func (ctxt *parseContext) matchAt9(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchTableRow(line); ok { + ctxt.startRule(RuleTypeDataTable) + ctxt.build(token) + return 10, err + } + if ok, token, err := ctxt.matchDocStringSeparator(line); ok { + ctxt.startRule(RuleTypeDocString) + ctxt.build(token) + return 49, err + } + if ok, token, err := ctxt.matchStepLine(line); ok { + ctxt.endRule(RuleTypeStep) + ctxt.startRule(RuleTypeStep) + ctxt.build(token) + return 9, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 11, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 12, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 9, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 9, err + } + + // var stateComment = "State: 9 - GherkinDocument:0>Feature:1>Background:2>Step:0>#StepLine:0" + var expectedTokens = []string{"#EOF", "#TableRow", "#DocStringSeparator", "#StepLine", "#TagLine", "#ScenarioLine", "#RuleLine", "#Comment", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 9, err +} + +// GherkinDocument:0>Feature:1>Background:2>Step:1>StepArg:0>__alt0:0>DataTable:0>#TableRow:0 +func (ctxt *parseContext) matchAt10(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeDataTable) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchTableRow(line); ok { + ctxt.build(token) + return 10, err + } + if ok, token, err := ctxt.matchStepLine(line); ok { + ctxt.endRule(RuleTypeDataTable) + ctxt.endRule(RuleTypeStep) + ctxt.startRule(RuleTypeStep) + ctxt.build(token) + return 9, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeDataTable) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 11, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeDataTable) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeDataTable) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 12, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeDataTable) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 10, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 10, err + } + + // var stateComment = "State: 10 - GherkinDocument:0>Feature:1>Background:2>Step:1>StepArg:0>__alt0:0>DataTable:0>#TableRow:0" + var expectedTokens = []string{"#EOF", "#TableRow", "#StepLine", "#TagLine", "#ScenarioLine", "#RuleLine", "#Comment", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 10, err +} + +// GherkinDocument:0>Feature:2>ScenarioDefinition:0>Tags:0>#TagLine:0 +func (ctxt *parseContext) matchAt11(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.build(token) + return 11, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeTags) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 12, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 11, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 11, err + } + + // var stateComment = "State: 11 - GherkinDocument:0>Feature:2>ScenarioDefinition:0>Tags:0>#TagLine:0" + var expectedTokens = []string{"#TagLine", "#ScenarioLine", "#Comment", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 11, err +} + +// GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:0>#ScenarioLine:0 +func (ctxt *parseContext) matchAt12(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 12, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 14, err + } + if ok, token, err := ctxt.matchStepLine(line); ok { + ctxt.startRule(RuleTypeStep) + ctxt.build(token) + return 15, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead1(line) { + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 17, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 11, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchExamplesLine(line); ok { + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamples) + ctxt.build(token) + return 18, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 12, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchOther(line); ok { + ctxt.startRule(RuleTypeDescription) + ctxt.build(token) + return 13, err + } + + // var stateComment = "State: 12 - GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:0>#ScenarioLine:0" + var expectedTokens = []string{"#EOF", "#Empty", "#Comment", "#StepLine", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Other"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 12, err +} + +// GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:1>DescriptionHelper:1>Description:0>#Other:0 +func (ctxt *parseContext) matchAt13(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.build(token) + return 14, err + } + if ok, token, err := ctxt.matchStepLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.startRule(RuleTypeStep) + ctxt.build(token) + return 15, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead1(line) { + ctxt.endRule(RuleTypeDescription) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 17, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 11, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchExamplesLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamples) + ctxt.build(token) + return 18, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 12, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchOther(line); ok { + ctxt.build(token) + return 13, err + } + + // var stateComment = "State: 13 - GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:1>DescriptionHelper:1>Description:0>#Other:0" + var expectedTokens = []string{"#EOF", "#Comment", "#StepLine", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Other"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 13, err +} + +// GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:1>DescriptionHelper:2>#Comment:0 +func (ctxt *parseContext) matchAt14(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 14, err + } + if ok, token, err := ctxt.matchStepLine(line); ok { + ctxt.startRule(RuleTypeStep) + ctxt.build(token) + return 15, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead1(line) { + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 17, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 11, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchExamplesLine(line); ok { + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamples) + ctxt.build(token) + return 18, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 12, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 14, err + } + + // var stateComment = "State: 14 - GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:1>DescriptionHelper:2>#Comment:0" + var expectedTokens = []string{"#EOF", "#Comment", "#StepLine", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 14, err +} + +// GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:2>Step:0>#StepLine:0 +func (ctxt *parseContext) matchAt15(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchTableRow(line); ok { + ctxt.startRule(RuleTypeDataTable) + ctxt.build(token) + return 16, err + } + if ok, token, err := ctxt.matchDocStringSeparator(line); ok { + ctxt.startRule(RuleTypeDocString) + ctxt.build(token) + return 47, err + } + if ok, token, err := ctxt.matchStepLine(line); ok { + ctxt.endRule(RuleTypeStep) + ctxt.startRule(RuleTypeStep) + ctxt.build(token) + return 15, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead1(line) { + ctxt.endRule(RuleTypeStep) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 17, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 11, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchExamplesLine(line); ok { + ctxt.endRule(RuleTypeStep) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamples) + ctxt.build(token) + return 18, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 12, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 15, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 15, err + } + + // var stateComment = "State: 15 - GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:2>Step:0>#StepLine:0" + var expectedTokens = []string{"#EOF", "#TableRow", "#DocStringSeparator", "#StepLine", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Comment", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 15, err +} + +// GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:2>Step:1>StepArg:0>__alt0:0>DataTable:0>#TableRow:0 +func (ctxt *parseContext) matchAt16(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeDataTable) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchTableRow(line); ok { + ctxt.build(token) + return 16, err + } + if ok, token, err := ctxt.matchStepLine(line); ok { + ctxt.endRule(RuleTypeDataTable) + ctxt.endRule(RuleTypeStep) + ctxt.startRule(RuleTypeStep) + ctxt.build(token) + return 15, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead1(line) { + ctxt.endRule(RuleTypeDataTable) + ctxt.endRule(RuleTypeStep) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 17, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeDataTable) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 11, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeDataTable) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchExamplesLine(line); ok { + ctxt.endRule(RuleTypeDataTable) + ctxt.endRule(RuleTypeStep) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamples) + ctxt.build(token) + return 18, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeDataTable) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 12, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeDataTable) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 16, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 16, err + } + + // var stateComment = "State: 16 - GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:2>Step:1>StepArg:0>__alt0:0>DataTable:0>#TableRow:0" + var expectedTokens = []string{"#EOF", "#TableRow", "#StepLine", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Comment", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 16, err +} + +// GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:0>Tags:0>#TagLine:0 +func (ctxt *parseContext) matchAt17(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.build(token) + return 17, err + } + if ok, token, err := ctxt.matchExamplesLine(line); ok { + ctxt.endRule(RuleTypeTags) + ctxt.startRule(RuleTypeExamples) + ctxt.build(token) + return 18, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 17, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 17, err + } + + // var stateComment = "State: 17 - GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:0>Tags:0>#TagLine:0" + var expectedTokens = []string{"#TagLine", "#ExamplesLine", "#Comment", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 17, err +} + +// GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:1>Examples:0>#ExamplesLine:0 +func (ctxt *parseContext) matchAt18(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 18, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 20, err + } + if ok, token, err := ctxt.matchTableRow(line); ok { + ctxt.startRule(RuleTypeExamplesTable) + ctxt.build(token) + return 21, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead1(line) { + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 17, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 11, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchExamplesLine(line); ok { + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamples) + ctxt.build(token) + return 18, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 12, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchOther(line); ok { + ctxt.startRule(RuleTypeDescription) + ctxt.build(token) + return 19, err + } + + // var stateComment = "State: 18 - GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:1>Examples:0>#ExamplesLine:0" + var expectedTokens = []string{"#EOF", "#Empty", "#Comment", "#TableRow", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Other"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 18, err +} + +// GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:1>Examples:1>DescriptionHelper:1>Description:0>#Other:0 +func (ctxt *parseContext) matchAt19(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.build(token) + return 20, err + } + if ok, token, err := ctxt.matchTableRow(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.startRule(RuleTypeExamplesTable) + ctxt.build(token) + return 21, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead1(line) { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 17, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 11, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchExamplesLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamples) + ctxt.build(token) + return 18, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 12, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchOther(line); ok { + ctxt.build(token) + return 19, err + } + + // var stateComment = "State: 19 - GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:1>Examples:1>DescriptionHelper:1>Description:0>#Other:0" + var expectedTokens = []string{"#EOF", "#Comment", "#TableRow", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Other"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 19, err +} + +// GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:1>Examples:1>DescriptionHelper:2>#Comment:0 +func (ctxt *parseContext) matchAt20(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 20, err + } + if ok, token, err := ctxt.matchTableRow(line); ok { + ctxt.startRule(RuleTypeExamplesTable) + ctxt.build(token) + return 21, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead1(line) { + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 17, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 11, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchExamplesLine(line); ok { + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamples) + ctxt.build(token) + return 18, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 12, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 20, err + } + + // var stateComment = "State: 20 - GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:1>Examples:1>DescriptionHelper:2>#Comment:0" + var expectedTokens = []string{"#EOF", "#Comment", "#TableRow", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 20, err +} + +// GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:1>Examples:2>ExamplesTable:0>#TableRow:0 +func (ctxt *parseContext) matchAt21(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeExamplesTable) + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchTableRow(line); ok { + ctxt.build(token) + return 21, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead1(line) { + ctxt.endRule(RuleTypeExamplesTable) + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 17, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeExamplesTable) + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 11, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeExamplesTable) + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchExamplesLine(line); ok { + ctxt.endRule(RuleTypeExamplesTable) + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamples) + ctxt.build(token) + return 18, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeExamplesTable) + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 12, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeExamplesTable) + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 21, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 21, err + } + + // var stateComment = "State: 21 - GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:1>Examples:2>ExamplesTable:0>#TableRow:0" + var expectedTokens = []string{"#EOF", "#TableRow", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Comment", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 21, err +} + +// GherkinDocument:0>Feature:3>Rule:0>RuleHeader:0>Tags:0>#TagLine:0 +func (ctxt *parseContext) matchAt22(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeTags) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 22, err + } + + // var stateComment = "State: 22 - GherkinDocument:0>Feature:3>Rule:0>RuleHeader:0>Tags:0>#TagLine:0" + var expectedTokens = []string{"#TagLine", "#RuleLine", "#Comment", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 22, err +} + +// GherkinDocument:0>Feature:3>Rule:0>RuleHeader:1>#RuleLine:0 +func (ctxt *parseContext) matchAt23(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeRuleHeader) + ctxt.endRule(RuleTypeRule) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 25, err + } + if ok, token, err := ctxt.matchBackgroundLine(line); ok { + ctxt.endRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeBackground) + ctxt.build(token) + return 26, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 31, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeRuleHeader) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 32, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeRuleHeader) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchOther(line); ok { + ctxt.startRule(RuleTypeDescription) + ctxt.build(token) + return 24, err + } + + // var stateComment = "State: 23 - GherkinDocument:0>Feature:3>Rule:0>RuleHeader:1>#RuleLine:0" + var expectedTokens = []string{"#EOF", "#Empty", "#Comment", "#BackgroundLine", "#TagLine", "#ScenarioLine", "#RuleLine", "#Other"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 23, err +} + +// GherkinDocument:0>Feature:3>Rule:0>RuleHeader:2>DescriptionHelper:1>Description:0>#Other:0 +func (ctxt *parseContext) matchAt24(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeRuleHeader) + ctxt.endRule(RuleTypeRule) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.build(token) + return 25, err + } + if ok, token, err := ctxt.matchBackgroundLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeBackground) + ctxt.build(token) + return 26, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 31, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeRuleHeader) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 32, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeRuleHeader) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchOther(line); ok { + ctxt.build(token) + return 24, err + } + + // var stateComment = "State: 24 - GherkinDocument:0>Feature:3>Rule:0>RuleHeader:2>DescriptionHelper:1>Description:0>#Other:0" + var expectedTokens = []string{"#EOF", "#Comment", "#BackgroundLine", "#TagLine", "#ScenarioLine", "#RuleLine", "#Other"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 24, err +} + +// GherkinDocument:0>Feature:3>Rule:0>RuleHeader:2>DescriptionHelper:2>#Comment:0 +func (ctxt *parseContext) matchAt25(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeRuleHeader) + ctxt.endRule(RuleTypeRule) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 25, err + } + if ok, token, err := ctxt.matchBackgroundLine(line); ok { + ctxt.endRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeBackground) + ctxt.build(token) + return 26, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 31, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeRuleHeader) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 32, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeRuleHeader) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 25, err + } + + // var stateComment = "State: 25 - GherkinDocument:0>Feature:3>Rule:0>RuleHeader:2>DescriptionHelper:2>#Comment:0" + var expectedTokens = []string{"#EOF", "#Comment", "#BackgroundLine", "#TagLine", "#ScenarioLine", "#RuleLine", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 25, err +} + +// GherkinDocument:0>Feature:3>Rule:1>Background:0>#BackgroundLine:0 +func (ctxt *parseContext) matchAt26(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeBackground) + ctxt.endRule(RuleTypeRule) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 26, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 28, err + } + if ok, token, err := ctxt.matchStepLine(line); ok { + ctxt.startRule(RuleTypeStep) + ctxt.build(token) + return 29, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 31, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeBackground) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 32, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeBackground) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchOther(line); ok { + ctxt.startRule(RuleTypeDescription) + ctxt.build(token) + return 27, err + } + + // var stateComment = "State: 26 - GherkinDocument:0>Feature:3>Rule:1>Background:0>#BackgroundLine:0" + var expectedTokens = []string{"#EOF", "#Empty", "#Comment", "#StepLine", "#TagLine", "#ScenarioLine", "#RuleLine", "#Other"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 26, err +} + +// GherkinDocument:0>Feature:3>Rule:1>Background:1>DescriptionHelper:1>Description:0>#Other:0 +func (ctxt *parseContext) matchAt27(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeBackground) + ctxt.endRule(RuleTypeRule) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.build(token) + return 28, err + } + if ok, token, err := ctxt.matchStepLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.startRule(RuleTypeStep) + ctxt.build(token) + return 29, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 31, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeBackground) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 32, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeBackground) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchOther(line); ok { + ctxt.build(token) + return 27, err + } + + // var stateComment = "State: 27 - GherkinDocument:0>Feature:3>Rule:1>Background:1>DescriptionHelper:1>Description:0>#Other:0" + var expectedTokens = []string{"#EOF", "#Comment", "#StepLine", "#TagLine", "#ScenarioLine", "#RuleLine", "#Other"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 27, err +} + +// GherkinDocument:0>Feature:3>Rule:1>Background:1>DescriptionHelper:2>#Comment:0 +func (ctxt *parseContext) matchAt28(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeBackground) + ctxt.endRule(RuleTypeRule) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 28, err + } + if ok, token, err := ctxt.matchStepLine(line); ok { + ctxt.startRule(RuleTypeStep) + ctxt.build(token) + return 29, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 31, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeBackground) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 32, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeBackground) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 28, err + } + + // var stateComment = "State: 28 - GherkinDocument:0>Feature:3>Rule:1>Background:1>DescriptionHelper:2>#Comment:0" + var expectedTokens = []string{"#EOF", "#Comment", "#StepLine", "#TagLine", "#ScenarioLine", "#RuleLine", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 28, err +} + +// GherkinDocument:0>Feature:3>Rule:1>Background:2>Step:0>#StepLine:0 +func (ctxt *parseContext) matchAt29(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.endRule(RuleTypeRule) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchTableRow(line); ok { + ctxt.startRule(RuleTypeDataTable) + ctxt.build(token) + return 30, err + } + if ok, token, err := ctxt.matchDocStringSeparator(line); ok { + ctxt.startRule(RuleTypeDocString) + ctxt.build(token) + return 45, err + } + if ok, token, err := ctxt.matchStepLine(line); ok { + ctxt.endRule(RuleTypeStep) + ctxt.startRule(RuleTypeStep) + ctxt.build(token) + return 29, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 31, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 32, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 29, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 29, err + } + + // var stateComment = "State: 29 - GherkinDocument:0>Feature:3>Rule:1>Background:2>Step:0>#StepLine:0" + var expectedTokens = []string{"#EOF", "#TableRow", "#DocStringSeparator", "#StepLine", "#TagLine", "#ScenarioLine", "#RuleLine", "#Comment", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 29, err +} + +// GherkinDocument:0>Feature:3>Rule:1>Background:2>Step:1>StepArg:0>__alt0:0>DataTable:0>#TableRow:0 +func (ctxt *parseContext) matchAt30(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeDataTable) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.endRule(RuleTypeRule) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchTableRow(line); ok { + ctxt.build(token) + return 30, err + } + if ok, token, err := ctxt.matchStepLine(line); ok { + ctxt.endRule(RuleTypeDataTable) + ctxt.endRule(RuleTypeStep) + ctxt.startRule(RuleTypeStep) + ctxt.build(token) + return 29, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeDataTable) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 31, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeDataTable) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeDataTable) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 32, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeDataTable) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 30, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 30, err + } + + // var stateComment = "State: 30 - GherkinDocument:0>Feature:3>Rule:1>Background:2>Step:1>StepArg:0>__alt0:0>DataTable:0>#TableRow:0" + var expectedTokens = []string{"#EOF", "#TableRow", "#StepLine", "#TagLine", "#ScenarioLine", "#RuleLine", "#Comment", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 30, err +} + +// GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:0>Tags:0>#TagLine:0 +func (ctxt *parseContext) matchAt31(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.build(token) + return 31, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeTags) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 32, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 31, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 31, err + } + + // var stateComment = "State: 31 - GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:0>Tags:0>#TagLine:0" + var expectedTokens = []string{"#TagLine", "#ScenarioLine", "#Comment", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 31, err +} + +// GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:0>#ScenarioLine:0 +func (ctxt *parseContext) matchAt32(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 32, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 34, err + } + if ok, token, err := ctxt.matchStepLine(line); ok { + ctxt.startRule(RuleTypeStep) + ctxt.build(token) + return 35, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead1(line) { + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 37, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 31, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchExamplesLine(line); ok { + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamples) + ctxt.build(token) + return 38, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 32, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchOther(line); ok { + ctxt.startRule(RuleTypeDescription) + ctxt.build(token) + return 33, err + } + + // var stateComment = "State: 32 - GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:0>#ScenarioLine:0" + var expectedTokens = []string{"#EOF", "#Empty", "#Comment", "#StepLine", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Other"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 32, err +} + +// GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:1>DescriptionHelper:1>Description:0>#Other:0 +func (ctxt *parseContext) matchAt33(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.build(token) + return 34, err + } + if ok, token, err := ctxt.matchStepLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.startRule(RuleTypeStep) + ctxt.build(token) + return 35, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead1(line) { + ctxt.endRule(RuleTypeDescription) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 37, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 31, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchExamplesLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamples) + ctxt.build(token) + return 38, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 32, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchOther(line); ok { + ctxt.build(token) + return 33, err + } + + // var stateComment = "State: 33 - GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:1>DescriptionHelper:1>Description:0>#Other:0" + var expectedTokens = []string{"#EOF", "#Comment", "#StepLine", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Other"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 33, err +} + +// GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:1>DescriptionHelper:2>#Comment:0 +func (ctxt *parseContext) matchAt34(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 34, err + } + if ok, token, err := ctxt.matchStepLine(line); ok { + ctxt.startRule(RuleTypeStep) + ctxt.build(token) + return 35, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead1(line) { + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 37, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 31, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchExamplesLine(line); ok { + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamples) + ctxt.build(token) + return 38, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 32, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 34, err + } + + // var stateComment = "State: 34 - GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:1>DescriptionHelper:2>#Comment:0" + var expectedTokens = []string{"#EOF", "#Comment", "#StepLine", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 34, err +} + +// GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:2>Step:0>#StepLine:0 +func (ctxt *parseContext) matchAt35(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchTableRow(line); ok { + ctxt.startRule(RuleTypeDataTable) + ctxt.build(token) + return 36, err + } + if ok, token, err := ctxt.matchDocStringSeparator(line); ok { + ctxt.startRule(RuleTypeDocString) + ctxt.build(token) + return 43, err + } + if ok, token, err := ctxt.matchStepLine(line); ok { + ctxt.endRule(RuleTypeStep) + ctxt.startRule(RuleTypeStep) + ctxt.build(token) + return 35, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead1(line) { + ctxt.endRule(RuleTypeStep) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 37, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 31, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchExamplesLine(line); ok { + ctxt.endRule(RuleTypeStep) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamples) + ctxt.build(token) + return 38, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 32, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 35, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 35, err + } + + // var stateComment = "State: 35 - GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:2>Step:0>#StepLine:0" + var expectedTokens = []string{"#EOF", "#TableRow", "#DocStringSeparator", "#StepLine", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Comment", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 35, err +} + +// GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:2>Step:1>StepArg:0>__alt0:0>DataTable:0>#TableRow:0 +func (ctxt *parseContext) matchAt36(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeDataTable) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchTableRow(line); ok { + ctxt.build(token) + return 36, err + } + if ok, token, err := ctxt.matchStepLine(line); ok { + ctxt.endRule(RuleTypeDataTable) + ctxt.endRule(RuleTypeStep) + ctxt.startRule(RuleTypeStep) + ctxt.build(token) + return 35, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead1(line) { + ctxt.endRule(RuleTypeDataTable) + ctxt.endRule(RuleTypeStep) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 37, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeDataTable) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 31, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeDataTable) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchExamplesLine(line); ok { + ctxt.endRule(RuleTypeDataTable) + ctxt.endRule(RuleTypeStep) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamples) + ctxt.build(token) + return 38, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeDataTable) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 32, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeDataTable) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 36, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 36, err + } + + // var stateComment = "State: 36 - GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:2>Step:1>StepArg:0>__alt0:0>DataTable:0>#TableRow:0" + var expectedTokens = []string{"#EOF", "#TableRow", "#StepLine", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Comment", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 36, err +} + +// GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:0>Tags:0>#TagLine:0 +func (ctxt *parseContext) matchAt37(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.build(token) + return 37, err + } + if ok, token, err := ctxt.matchExamplesLine(line); ok { + ctxt.endRule(RuleTypeTags) + ctxt.startRule(RuleTypeExamples) + ctxt.build(token) + return 38, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 37, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 37, err + } + + // var stateComment = "State: 37 - GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:0>Tags:0>#TagLine:0" + var expectedTokens = []string{"#TagLine", "#ExamplesLine", "#Comment", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 37, err +} + +// GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:1>Examples:0>#ExamplesLine:0 +func (ctxt *parseContext) matchAt38(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 38, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 40, err + } + if ok, token, err := ctxt.matchTableRow(line); ok { + ctxt.startRule(RuleTypeExamplesTable) + ctxt.build(token) + return 41, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead1(line) { + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 37, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 31, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchExamplesLine(line); ok { + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamples) + ctxt.build(token) + return 38, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 32, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchOther(line); ok { + ctxt.startRule(RuleTypeDescription) + ctxt.build(token) + return 39, err + } + + // var stateComment = "State: 38 - GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:1>Examples:0>#ExamplesLine:0" + var expectedTokens = []string{"#EOF", "#Empty", "#Comment", "#TableRow", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Other"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 38, err +} + +// GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:1>Examples:1>DescriptionHelper:1>Description:0>#Other:0 +func (ctxt *parseContext) matchAt39(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.build(token) + return 40, err + } + if ok, token, err := ctxt.matchTableRow(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.startRule(RuleTypeExamplesTable) + ctxt.build(token) + return 41, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead1(line) { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 37, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 31, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchExamplesLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamples) + ctxt.build(token) + return 38, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 32, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeDescription) + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchOther(line); ok { + ctxt.build(token) + return 39, err + } + + // var stateComment = "State: 39 - GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:1>Examples:1>DescriptionHelper:1>Description:0>#Other:0" + var expectedTokens = []string{"#EOF", "#Comment", "#TableRow", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Other"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 39, err +} + +// GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:1>Examples:1>DescriptionHelper:2>#Comment:0 +func (ctxt *parseContext) matchAt40(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 40, err + } + if ok, token, err := ctxt.matchTableRow(line); ok { + ctxt.startRule(RuleTypeExamplesTable) + ctxt.build(token) + return 41, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead1(line) { + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 37, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 31, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchExamplesLine(line); ok { + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamples) + ctxt.build(token) + return 38, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 32, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 40, err + } + + // var stateComment = "State: 40 - GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:1>Examples:1>DescriptionHelper:2>#Comment:0" + var expectedTokens = []string{"#EOF", "#Comment", "#TableRow", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 40, err +} + +// GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:1>Examples:2>ExamplesTable:0>#TableRow:0 +func (ctxt *parseContext) matchAt41(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeExamplesTable) + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchTableRow(line); ok { + ctxt.build(token) + return 41, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead1(line) { + ctxt.endRule(RuleTypeExamplesTable) + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 37, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeExamplesTable) + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 31, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeExamplesTable) + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchExamplesLine(line); ok { + ctxt.endRule(RuleTypeExamplesTable) + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamples) + ctxt.build(token) + return 38, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeExamplesTable) + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 32, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeExamplesTable) + ctxt.endRule(RuleTypeExamples) + ctxt.endRule(RuleTypeExamplesDefinition) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 41, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 41, err + } + + // var stateComment = "State: 41 - GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:3>ExamplesDefinition:1>Examples:2>ExamplesTable:0>#TableRow:0" + var expectedTokens = []string{"#EOF", "#TableRow", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Comment", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 41, err +} + +// GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:2>Step:1>StepArg:0>__alt0:1>DocString:0>#DocStringSeparator:0 +func (ctxt *parseContext) matchAt43(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchDocStringSeparator(line); ok { + ctxt.build(token) + return 44, err + } + if ok, token, err := ctxt.matchOther(line); ok { + ctxt.build(token) + return 43, err + } + + // var stateComment = "State: 43 - GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:2>Step:1>StepArg:0>__alt0:1>DocString:0>#DocStringSeparator:0" + var expectedTokens = []string{"#DocStringSeparator", "#Other"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 43, err +} + +// GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:2>Step:1>StepArg:0>__alt0:1>DocString:2>#DocStringSeparator:0 +func (ctxt *parseContext) matchAt44(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeDocString) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchStepLine(line); ok { + ctxt.endRule(RuleTypeDocString) + ctxt.endRule(RuleTypeStep) + ctxt.startRule(RuleTypeStep) + ctxt.build(token) + return 35, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead1(line) { + ctxt.endRule(RuleTypeDocString) + ctxt.endRule(RuleTypeStep) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 37, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeDocString) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 31, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeDocString) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchExamplesLine(line); ok { + ctxt.endRule(RuleTypeDocString) + ctxt.endRule(RuleTypeStep) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamples) + ctxt.build(token) + return 38, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeDocString) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 32, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeDocString) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 44, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 44, err + } + + // var stateComment = "State: 44 - GherkinDocument:0>Feature:3>Rule:2>ScenarioDefinition:1>Scenario:2>Step:1>StepArg:0>__alt0:1>DocString:2>#DocStringSeparator:0" + var expectedTokens = []string{"#EOF", "#StepLine", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Comment", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 44, err +} + +// GherkinDocument:0>Feature:3>Rule:1>Background:2>Step:1>StepArg:0>__alt0:1>DocString:0>#DocStringSeparator:0 +func (ctxt *parseContext) matchAt45(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchDocStringSeparator(line); ok { + ctxt.build(token) + return 46, err + } + if ok, token, err := ctxt.matchOther(line); ok { + ctxt.build(token) + return 45, err + } + + // var stateComment = "State: 45 - GherkinDocument:0>Feature:3>Rule:1>Background:2>Step:1>StepArg:0>__alt0:1>DocString:0>#DocStringSeparator:0" + var expectedTokens = []string{"#DocStringSeparator", "#Other"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 45, err +} + +// GherkinDocument:0>Feature:3>Rule:1>Background:2>Step:1>StepArg:0>__alt0:1>DocString:2>#DocStringSeparator:0 +func (ctxt *parseContext) matchAt46(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeDocString) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.endRule(RuleTypeRule) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchStepLine(line); ok { + ctxt.endRule(RuleTypeDocString) + ctxt.endRule(RuleTypeStep) + ctxt.startRule(RuleTypeStep) + ctxt.build(token) + return 29, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeDocString) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 31, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeDocString) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeDocString) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 32, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeDocString) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.endRule(RuleTypeRule) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 46, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 46, err + } + + // var stateComment = "State: 46 - GherkinDocument:0>Feature:3>Rule:1>Background:2>Step:1>StepArg:0>__alt0:1>DocString:2>#DocStringSeparator:0" + var expectedTokens = []string{"#EOF", "#StepLine", "#TagLine", "#ScenarioLine", "#RuleLine", "#Comment", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 46, err +} + +// GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:2>Step:1>StepArg:0>__alt0:1>DocString:0>#DocStringSeparator:0 +func (ctxt *parseContext) matchAt47(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchDocStringSeparator(line); ok { + ctxt.build(token) + return 48, err + } + if ok, token, err := ctxt.matchOther(line); ok { + ctxt.build(token) + return 47, err + } + + // var stateComment = "State: 47 - GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:2>Step:1>StepArg:0>__alt0:1>DocString:0>#DocStringSeparator:0" + var expectedTokens = []string{"#DocStringSeparator", "#Other"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 47, err +} + +// GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:2>Step:1>StepArg:0>__alt0:1>DocString:2>#DocStringSeparator:0 +func (ctxt *parseContext) matchAt48(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeDocString) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchStepLine(line); ok { + ctxt.endRule(RuleTypeDocString) + ctxt.endRule(RuleTypeStep) + ctxt.startRule(RuleTypeStep) + ctxt.build(token) + return 15, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead1(line) { + ctxt.endRule(RuleTypeDocString) + ctxt.endRule(RuleTypeStep) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 17, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeDocString) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 11, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeDocString) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchExamplesLine(line); ok { + ctxt.endRule(RuleTypeDocString) + ctxt.endRule(RuleTypeStep) + ctxt.startRule(RuleTypeExamplesDefinition) + ctxt.startRule(RuleTypeExamples) + ctxt.build(token) + return 18, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeDocString) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 12, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeDocString) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeScenario) + ctxt.endRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 48, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 48, err + } + + // var stateComment = "State: 48 - GherkinDocument:0>Feature:2>ScenarioDefinition:1>Scenario:2>Step:1>StepArg:0>__alt0:1>DocString:2>#DocStringSeparator:0" + var expectedTokens = []string{"#EOF", "#StepLine", "#TagLine", "#ExamplesLine", "#ScenarioLine", "#RuleLine", "#Comment", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 48, err +} + +// GherkinDocument:0>Feature:1>Background:2>Step:1>StepArg:0>__alt0:1>DocString:0>#DocStringSeparator:0 +func (ctxt *parseContext) matchAt49(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchDocStringSeparator(line); ok { + ctxt.build(token) + return 50, err + } + if ok, token, err := ctxt.matchOther(line); ok { + ctxt.build(token) + return 49, err + } + + // var stateComment = "State: 49 - GherkinDocument:0>Feature:1>Background:2>Step:1>StepArg:0>__alt0:1>DocString:0>#DocStringSeparator:0" + var expectedTokens = []string{"#DocStringSeparator", "#Other"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 49, err +} + +// GherkinDocument:0>Feature:1>Background:2>Step:1>StepArg:0>__alt0:1>DocString:2>#DocStringSeparator:0 +func (ctxt *parseContext) matchAt50(line *Line) (newState int, err error) { + if ok, token, err := ctxt.matchEOF(line); ok { + ctxt.endRule(RuleTypeDocString) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.endRule(RuleTypeFeature) + ctxt.build(token) + return 42, err + } + if ok, token, err := ctxt.matchStepLine(line); ok { + ctxt.endRule(RuleTypeDocString) + ctxt.endRule(RuleTypeStep) + ctxt.startRule(RuleTypeStep) + ctxt.build(token) + return 9, err + } + if ok, token, err := ctxt.matchTagLine(line); ok { + if ctxt.lookahead0(line) { + ctxt.endRule(RuleTypeDocString) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 11, err + } + } + if ok, token, err := ctxt.matchTagLine(line); ok { + ctxt.endRule(RuleTypeDocString) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.startRule(RuleTypeTags) + ctxt.build(token) + return 22, err + } + if ok, token, err := ctxt.matchScenarioLine(line); ok { + ctxt.endRule(RuleTypeDocString) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeScenarioDefinition) + ctxt.startRule(RuleTypeScenario) + ctxt.build(token) + return 12, err + } + if ok, token, err := ctxt.matchRuleLine(line); ok { + ctxt.endRule(RuleTypeDocString) + ctxt.endRule(RuleTypeStep) + ctxt.endRule(RuleTypeBackground) + ctxt.startRule(RuleTypeRule) + ctxt.startRule(RuleTypeRuleHeader) + ctxt.build(token) + return 23, err + } + if ok, token, err := ctxt.matchComment(line); ok { + ctxt.build(token) + return 50, err + } + if ok, token, err := ctxt.matchEmpty(line); ok { + ctxt.build(token) + return 50, err + } + + // var stateComment = "State: 50 - GherkinDocument:0>Feature:1>Background:2>Step:1>StepArg:0>__alt0:1>DocString:2>#DocStringSeparator:0" + var expectedTokens = []string{"#EOF", "#StepLine", "#TagLine", "#ScenarioLine", "#RuleLine", "#Comment", "#Empty"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens, ", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens, ", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return 50, err +} + +type Matcher interface { + MatchEOF(line *Line) (bool, *Token, error) + MatchEmpty(line *Line) (bool, *Token, error) + MatchComment(line *Line) (bool, *Token, error) + MatchTagLine(line *Line) (bool, *Token, error) + MatchFeatureLine(line *Line) (bool, *Token, error) + MatchRuleLine(line *Line) (bool, *Token, error) + MatchBackgroundLine(line *Line) (bool, *Token, error) + MatchScenarioLine(line *Line) (bool, *Token, error) + MatchExamplesLine(line *Line) (bool, *Token, error) + MatchStepLine(line *Line) (bool, *Token, error) + MatchDocStringSeparator(line *Line) (bool, *Token, error) + MatchTableRow(line *Line) (bool, *Token, error) + MatchLanguage(line *Line) (bool, *Token, error) + MatchOther(line *Line) (bool, *Token, error) + Reset() +} + +func (ctxt *parseContext) isMatchEOF(line *Line) bool { + ok, _, _ := ctxt.matchEOF(line) + return ok +} +func (ctxt *parseContext) matchEOF(line *Line) (bool, *Token, error) { + return ctxt.m.MatchEOF(line) +} + +func (ctxt *parseContext) isMatchEmpty(line *Line) bool { + ok, _, _ := ctxt.matchEmpty(line) + return ok +} +func (ctxt *parseContext) matchEmpty(line *Line) (bool, *Token, error) { + if line.IsEof() { + return false, nil, nil + } + return ctxt.m.MatchEmpty(line) +} + +func (ctxt *parseContext) isMatchComment(line *Line) bool { + ok, _, _ := ctxt.matchComment(line) + return ok +} +func (ctxt *parseContext) matchComment(line *Line) (bool, *Token, error) { + if line.IsEof() { + return false, nil, nil + } + return ctxt.m.MatchComment(line) +} + +func (ctxt *parseContext) isMatchTagLine(line *Line) bool { + ok, _, _ := ctxt.matchTagLine(line) + return ok +} +func (ctxt *parseContext) matchTagLine(line *Line) (bool, *Token, error) { + if line.IsEof() { + return false, nil, nil + } + return ctxt.m.MatchTagLine(line) +} + +func (ctxt *parseContext) isMatchFeatureLine(line *Line) bool { + ok, _, _ := ctxt.matchFeatureLine(line) + return ok +} +func (ctxt *parseContext) matchFeatureLine(line *Line) (bool, *Token, error) { + if line.IsEof() { + return false, nil, nil + } + return ctxt.m.MatchFeatureLine(line) +} + +func (ctxt *parseContext) isMatchRuleLine(line *Line) bool { + ok, _, _ := ctxt.matchRuleLine(line) + return ok +} +func (ctxt *parseContext) matchRuleLine(line *Line) (bool, *Token, error) { + if line.IsEof() { + return false, nil, nil + } + return ctxt.m.MatchRuleLine(line) +} + +func (ctxt *parseContext) isMatchBackgroundLine(line *Line) bool { + ok, _, _ := ctxt.matchBackgroundLine(line) + return ok +} +func (ctxt *parseContext) matchBackgroundLine(line *Line) (bool, *Token, error) { + if line.IsEof() { + return false, nil, nil + } + return ctxt.m.MatchBackgroundLine(line) +} + +func (ctxt *parseContext) isMatchScenarioLine(line *Line) bool { + ok, _, _ := ctxt.matchScenarioLine(line) + return ok +} +func (ctxt *parseContext) matchScenarioLine(line *Line) (bool, *Token, error) { + if line.IsEof() { + return false, nil, nil + } + return ctxt.m.MatchScenarioLine(line) +} + +func (ctxt *parseContext) isMatchExamplesLine(line *Line) bool { + ok, _, _ := ctxt.matchExamplesLine(line) + return ok +} +func (ctxt *parseContext) matchExamplesLine(line *Line) (bool, *Token, error) { + if line.IsEof() { + return false, nil, nil + } + return ctxt.m.MatchExamplesLine(line) +} + +func (ctxt *parseContext) isMatchStepLine(line *Line) bool { + ok, _, _ := ctxt.matchStepLine(line) + return ok +} +func (ctxt *parseContext) matchStepLine(line *Line) (bool, *Token, error) { + if line.IsEof() { + return false, nil, nil + } + return ctxt.m.MatchStepLine(line) +} + +func (ctxt *parseContext) isMatchDocStringSeparator(line *Line) bool { + ok, _, _ := ctxt.matchDocStringSeparator(line) + return ok +} +func (ctxt *parseContext) matchDocStringSeparator(line *Line) (bool, *Token, error) { + if line.IsEof() { + return false, nil, nil + } + return ctxt.m.MatchDocStringSeparator(line) +} + +func (ctxt *parseContext) isMatchTableRow(line *Line) bool { + ok, _, _ := ctxt.matchTableRow(line) + return ok +} +func (ctxt *parseContext) matchTableRow(line *Line) (bool, *Token, error) { + if line.IsEof() { + return false, nil, nil + } + return ctxt.m.MatchTableRow(line) +} + +func (ctxt *parseContext) isMatchLanguage(line *Line) bool { + ok, _, _ := ctxt.matchLanguage(line) + return ok +} +func (ctxt *parseContext) matchLanguage(line *Line) (bool, *Token, error) { + if line.IsEof() { + return false, nil, nil + } + return ctxt.m.MatchLanguage(line) +} + +func (ctxt *parseContext) isMatchOther(line *Line) bool { + ok, _, _ := ctxt.matchOther(line) + return ok +} +func (ctxt *parseContext) matchOther(line *Line) (bool, *Token, error) { + if line.IsEof() { + return false, nil, nil + } + return ctxt.m.MatchOther(line) +} + +func (ctxt *parseContext) lookahead0(initialLine *Line) bool { + var queue []*scanResult + var match bool + + for { + line, atEof, err := ctxt.scan() + queue = append(queue, &scanResult{line, atEof, err}) + + if false || ctxt.isMatchScenarioLine(line) { + match = true + break + } + if !(false || ctxt.isMatchEmpty(line) || ctxt.isMatchComment(line) || ctxt.isMatchTagLine(line)) { + break + } + if atEof { + break + } + } + + ctxt.queue = append(ctxt.queue, queue...) + + return match +} + +func (ctxt *parseContext) lookahead1(initialLine *Line) bool { + var queue []*scanResult + var match bool + + for { + line, atEof, err := ctxt.scan() + queue = append(queue, &scanResult{line, atEof, err}) + + if false || ctxt.isMatchExamplesLine(line) { + match = true + break + } + if !(false || ctxt.isMatchEmpty(line) || ctxt.isMatchComment(line) || ctxt.isMatchTagLine(line)) { + break + } + if atEof { + break + } + } + + ctxt.queue = append(ctxt.queue, queue...) + + return match +} diff --git a/vendor/github.com/cucumber/gherkin/go/v26/parser.go.razor b/vendor/github.com/cucumber/gherkin/go/v26/parser.go.razor new file mode 100644 index 000000000..7b173db1f --- /dev/null +++ b/vendor/github.com/cucumber/gherkin/go/v26/parser.go.razor @@ -0,0 +1,309 @@ +@using Berp; +@helper CallProduction(ProductionRule production) +{ + switch(production.Type) + { + case ProductionRuleType.Start: + @:ctxt.startRule(@Raw("RuleType" + production.RuleName.Replace("#", ""))); + break; + case ProductionRuleType.End: + @:ctxt.endRule(@Raw("RuleType" + production.RuleName.Replace("#", ""))); + break; + case ProductionRuleType.Process: + @:ctxt.build(token); + break; + } +} +@helper HandleParserError(IEnumerable expectedTokens, State state) +{ + // var stateComment = "State: @state.Id - @Raw(state.Comment)" + var expectedTokens = []string{"@Raw(string.Join("\", \"", expectedTokens))"} + if line.IsEof() { + err = &parseError{ + msg: fmt.Sprintf("unexpected end of file, expected: %s", strings.Join(expectedTokens,", ")), + loc: &Location{Line: line.LineNumber, Column: 0}, + } + } else { + err = &parseError{ + msg: fmt.Sprintf("expected: %s, got '%s'", strings.Join(expectedTokens,", "), line.LineText), + loc: &Location{Line: line.LineNumber, Column: line.Indent() + 1}, + } + } + // if (ctxt.p.stopAtFirstError) throw error; + //ctxt.addError(err) + return @state.Id, err} +@helper MatchToken(TokenType tokenType) +{ctxt.match@(tokenType)(line)} +@helper IsMatchToken(TokenType tokenType) +{ctxt.isMatch@(tokenType)(line)} +@helper TokenConst(Rule rule) +{@Raw("rule" + rule.Name.Replace("#", "Int"))} +// +// This file is generated. Do not edit! Edit parser.go.razor instead. + +package gherkin + +import ( + "fmt" + "strings" +) + +type TokenType int + +const ( + TokenTypeNone TokenType = iota + @foreach(var rule in Model.RuleSet.TokenRules) + { @Raw("TokenType" + rule.Name.Replace("#", "")) +} +) + +func tokenTypeForRule(rt RuleType) TokenType { + return TokenTypeNone +} + +func (t TokenType) Name() string { + switch t { + @foreach(var rule in Model.RuleSet.TokenRules) + { case @Raw("TokenType" + rule.Name.Replace("#", "")): return "@Raw(rule.Name.Replace("#", ""))" +} + } + return "" +} + +func (t TokenType) RuleType() RuleType { + switch t { + @foreach(var rule in Model.RuleSet.TokenRules) + { case @Raw("TokenType" + rule.Name.Replace("#", "")): return @Raw("RuleType" + rule.Name.Replace("#", "")) +} + } + return RuleTypeNone +} + + +type RuleType int + +const ( + RuleTypeNone RuleType = iota + + @foreach(var rule in Model.RuleSet.Where(r => !r.TempRule)) + { @Raw("RuleType" + rule.Name.Replace("#", "")) +} +) + +func (t RuleType) IsEOF() bool { + return t == RuleTypeEOF +} +func (t RuleType) Name() string { + switch t { + @foreach(var rule in Model.RuleSet.Where(r => !r.TempRule)) + { case @Raw("RuleType" + rule.Name.Replace("#", "")): return "@Raw(rule.Name)" +} + } + return "" +} + +type Location struct { + Line int + Column int +} + +type parseError struct { + msg string + loc *Location +} + +func (a *parseError) Error() string { + return fmt.Sprintf("(%d:%d): %s", a.loc.Line, a.loc.Column, a.msg) +} + +type parseErrors []error +func (pe parseErrors) Error() string { + var ret = []string{"Parser errors:"} + for i := range pe { + ret = append(ret, pe[i].Error()) + } + return strings.Join(ret,"\n") +} + +func (p *parser) Parse(s Scanner, m Matcher) (err error) { + p.builder.Reset() + m.Reset() + ctxt := &parseContext{p,s,p.builder,m,nil,nil} + var state int + ctxt.startRule(@Raw("RuleType" + @Model.RuleSet.StartRule.Name)) + for { + gl, eof, err := ctxt.scan() + if err != nil { + ctxt.addError(err) + if p.stopAtFirstError { + break + } + } + state, err = ctxt.match(state, gl) + if err != nil { + ctxt.addError(err) + if p.stopAtFirstError { + break + } + } + if eof { + // done! \o/ + break + } + } + ctxt.endRule(@Raw("RuleType" + @Model.RuleSet.StartRule.Name)) + if len(ctxt.errors) > 0 { + return ctxt.errors + } + return +} + +type parseContext struct { + p *parser + s Scanner + b Builder + m Matcher + queue []*scanResult + errors parseErrors +} + +func (ctxt *parseContext) addError(e error) { + ctxt.errors = append(ctxt.errors, e); + // if (p.errors.length > 10) + // throw Errors.CompositeParserException.create(p.errors); +} + +type scanResult struct { + line *Line + atEof bool + err error +} +func (ctxt *parseContext) scan() (*Line, bool, error) { + l := len(ctxt.queue) + if l > 0 { + x := ctxt.queue[0] + ctxt.queue = ctxt.queue[1:] + return x.line, x.atEof, x.err + } + return ctxt.s.Scan() +} + +func (ctxt *parseContext) startRule(r RuleType) (bool, error) { + ok, err := ctxt.b.StartRule(r) + if err != nil { + ctxt.addError(err) + } + return ok, err +} + +func (ctxt *parseContext) endRule(r RuleType) (bool, error) { + ok, err := ctxt.b.EndRule(r) + if err != nil { + ctxt.addError(err) + } + return ok, err +} + +func (ctxt *parseContext) build(t *Token) (bool, error) { + ok, err := ctxt.b.Build(t) + if err != nil { + ctxt.addError(err) + } + return ok, err +} + + +func (ctxt *parseContext) match(state int, line *Line) (newState int, err error) { + switch(state) { + @foreach(var state in Model.States.Values.Where(s => !s.IsEndState)) + { + @:case @state.Id: + @:return ctxt.matchAt@(state.Id)(line); + } + default: + return state, fmt.Errorf("Unknown state: %+v", state); + } +} + +@foreach(var state in Model.States.Values.Where(s => !s.IsEndState)) +{ + + // @Raw(state.Comment) +func (ctxt *parseContext) matchAt@(state.Id)(line *Line) (newState int, err error) { + @foreach(var transition in state.Transitions) + { + @:if ok, token, err := @MatchToken(transition.TokenType); ok { + if (transition.LookAheadHint != null) + { + @:if ctxt.lookahead@(transition.LookAheadHint.Id)(line) { + } + foreach(var production in transition.Productions) + { + @CallProduction(production) + } + @:return @transition.TargetState, err; + if (transition.LookAheadHint != null) + { + @:} + } + @:} + } + @HandleParserError(state.Transitions.Select(t => "#" + t.TokenType.ToString()).Distinct(), state) +} + +} + +type Matcher interface { + @foreach(var rule in Model.RuleSet.TokenRules) + { Match@(rule.Name.Replace("#", ""))(line *Line) (bool,*Token,error) +} + Reset() +} +@foreach(var rule in Model.RuleSet.TokenRules) +{ + +func (ctxt *parseContext) isMatch@(rule.Name.Replace("#", ""))(line *Line) bool { + ok, _, _ := ctxt.match@(rule.Name.Replace("#", ""))(line) + return ok +} +func (ctxt *parseContext) match@(rule.Name.Replace("#", ""))(line *Line) (bool, *Token, error) { + @if (rule.Name != "#EOF") + { + @:if line.IsEof() { + @: return false, nil, nil + @:} + } + return ctxt.m.Match@(rule.Name.Replace("#", ""))(line); +} + +} + +@foreach(var lookAheadHint in Model.RuleSet.LookAheadHints) +{ + +func (ctxt *parseContext) lookahead@(lookAheadHint.Id)(initialLine *Line) bool { + var queue []*scanResult + var match bool + + for { + line, atEof, err := ctxt.scan(); + queue = append(queue, &scanResult{line,atEof,err}); + + if false @foreach(var tokenType in lookAheadHint.ExpectedTokens) { || @IsMatchToken(tokenType)} { + match = true; + break + } + if !(false @foreach(var tokenType in lookAheadHint.Skip) { || @IsMatchToken(tokenType)}) { + break + } + if atEof { + break + } + } + + ctxt.queue = append(ctxt.queue, queue...) + + return match; + } + +} diff --git a/vendor/github.com/cucumber/gherkin/go/v26/pickles.go b/vendor/github.com/cucumber/gherkin/go/v26/pickles.go new file mode 100644 index 000000000..ad3fa84d2 --- /dev/null +++ b/vendor/github.com/cucumber/gherkin/go/v26/pickles.go @@ -0,0 +1,266 @@ +package gherkin + +import ( + "github.com/cucumber/messages/go/v21" + "strings" +) + +func Pickles(gherkinDocument messages.GherkinDocument, uri string, newId func() string) []*messages.Pickle { + pickles := make([]*messages.Pickle, 0) + if gherkinDocument.Feature == nil { + return pickles + } + language := gherkinDocument.Feature.Language + + pickles = compileFeature(pickles, *gherkinDocument.Feature, uri, language, newId) + return pickles +} + +func compileFeature(pickles []*messages.Pickle, feature messages.Feature, uri string, language string, newId func() string) []*messages.Pickle { + featureBackgroundSteps := make([]*messages.Step, 0) + featureTags := feature.Tags + for _, child := range feature.Children { + if child.Background != nil { + featureBackgroundSteps = append(featureBackgroundSteps, child.Background.Steps...) + } + if child.Rule != nil { + pickles = compileRule(pickles, child.Rule, featureTags, featureBackgroundSteps, uri, language, newId) + } + if child.Scenario != nil { + if len(child.Scenario.Examples) == 0 { + pickles = compileScenario(pickles, featureBackgroundSteps, child.Scenario, featureTags, uri, language, newId) + } else { + pickles = compileScenarioOutline(pickles, child.Scenario, featureTags, featureBackgroundSteps, uri, language, newId) + } + } + } + return pickles +} + +func compileRule( + pickles []*messages.Pickle, + rule *messages.Rule, + featureTags []*messages.Tag, + featureBackgroundSteps []*messages.Step, + uri string, + language string, + newId func() string, +) []*messages.Pickle { + ruleBackgroundSteps := make([]*messages.Step, 0) + ruleBackgroundSteps = append(ruleBackgroundSteps, featureBackgroundSteps...) + tags := append(featureTags, rule.Tags...) + + for _, child := range rule.Children { + if child.Background != nil { + ruleBackgroundSteps = append(ruleBackgroundSteps, child.Background.Steps...) + } + if child.Scenario != nil { + if len(child.Scenario.Examples) == 0 { + pickles = compileScenario(pickles, ruleBackgroundSteps, child.Scenario, tags, uri, language, newId) + } else { + pickles = compileScenarioOutline(pickles, child.Scenario, tags, ruleBackgroundSteps, uri, language, newId) + } + } + } + return pickles + +} + +func compileScenarioOutline( + pickles []*messages.Pickle, + scenario *messages.Scenario, + inheritedTags []*messages.Tag, + backgroundSteps []*messages.Step, + uri string, + language string, + newId func() string, +) []*messages.Pickle { + for _, examples := range scenario.Examples { + if examples.TableHeader == nil { + continue + } + variableCells := examples.TableHeader.Cells + for _, valuesRow := range examples.TableBody { + valueCells := valuesRow.Cells + + computedPickleSteps := make([]*messages.PickleStep, 0) + pickleBackgroundSteps := make([]*messages.PickleStep, 0) + + if len(scenario.Steps) > 0 { + pickleBackgroundSteps = pickleSteps(backgroundSteps, newId) + } + + // translate computedPickleSteps based on valuesRow + previous := messages.PickleStepType_UNKNOWN + for _, step := range scenario.Steps { + text := step.Text + for i, variableCell := range variableCells { + text = strings.Replace(text, "<"+variableCell.Value+">", valueCells[i].Value, -1) + } + + pickleStep := pickleStep(step, variableCells, valuesRow, newId, previous) + previous = pickleStep.Type + computedPickleSteps = append(computedPickleSteps, pickleStep) + } + + // translate pickle name + name := scenario.Name + for i, key := range variableCells { + name = strings.Replace(name, "<"+key.Value+">", valueCells[i].Value, -1) + } + + if len(computedPickleSteps) > 0 { + computedPickleSteps = append(pickleBackgroundSteps, computedPickleSteps...) + } + + id := newId() + tags := pickleTags(append(inheritedTags, append(scenario.Tags, examples.Tags...)...)) + + pickles = append(pickles, &messages.Pickle{ + Id: id, + Uri: uri, + Steps: computedPickleSteps, + Tags: tags, + Name: name, + Language: language, + AstNodeIds: []string{scenario.Id, valuesRow.Id}, + }) + } + } + return pickles +} + +func compileScenario( + pickles []*messages.Pickle, + backgroundSteps []*messages.Step, + scenario *messages.Scenario, + inheritedTags []*messages.Tag, + uri string, + language string, + newId func() string, +) []*messages.Pickle { + steps := make([]*messages.PickleStep, 0) + if len(scenario.Steps) > 0 { + pickleBackgroundSteps := pickleSteps(backgroundSteps, newId) + steps = append(pickleBackgroundSteps, pickleSteps(scenario.Steps, newId)...) + } + tags := pickleTags(append(inheritedTags, scenario.Tags...)) + id := newId() + pickles = append(pickles, &messages.Pickle{ + Id: id, + Uri: uri, + Steps: steps, + Tags: tags, + Name: scenario.Name, + Language: language, + AstNodeIds: []string{scenario.Id}, + }) + return pickles +} + +func pickleDataTable(table *messages.DataTable, variableCells []*messages.TableCell, valueCells []*messages.TableCell) *messages.PickleTable { + pickleTableRows := make([]*messages.PickleTableRow, len(table.Rows)) + for i, row := range table.Rows { + pickleTableCells := make([]*messages.PickleTableCell, len(row.Cells)) + for j, cell := range row.Cells { + pickleTableCells[j] = &messages.PickleTableCell{ + Value: interpolate(cell.Value, variableCells, valueCells), + } + } + pickleTableRows[i] = &messages.PickleTableRow{Cells: pickleTableCells} + } + return &messages.PickleTable{Rows: pickleTableRows} +} + +func pickleDocString(docString *messages.DocString, variableCells []*messages.TableCell, valueCells []*messages.TableCell) *messages.PickleDocString { + return &messages.PickleDocString{ + MediaType: interpolate(docString.MediaType, variableCells, valueCells), + Content: interpolate(docString.Content, variableCells, valueCells), + } +} + +func pickleTags(tags []*messages.Tag) []*messages.PickleTag { + ptags := make([]*messages.PickleTag, len(tags)) + for i, tag := range tags { + ptags[i] = &messages.PickleTag{ + Name: tag.Name, + AstNodeId: tag.Id, + } + } + return ptags +} + +func pickleSteps(steps []*messages.Step, newId func() string) []*messages.PickleStep { + pickleSteps := make([]*messages.PickleStep, len(steps)) + previous := messages.PickleStepType_UNKNOWN + for i, step := range steps { + pickleStep := pickleStep(step, nil, nil, newId, previous) + previous = pickleStep.Type + pickleSteps[i] = pickleStep + } + return pickleSteps +} + +func pickleStep( + step *messages.Step, + variableCells []*messages.TableCell, + valuesRow *messages.TableRow, + newId func() string, + previous messages.PickleStepType, +) *messages.PickleStep { + + var valueCells []*messages.TableCell + if valuesRow != nil { + valueCells = valuesRow.Cells + } + + pickleStep := &messages.PickleStep{ + Id: newId(), + Text: interpolate(step.Text, variableCells, valueCells), + Type: mapType(step.KeywordType, previous), + AstNodeIds: []string{step.Id}, + } + if valuesRow != nil { + pickleStep.AstNodeIds = append(pickleStep.AstNodeIds, valuesRow.Id) + } + if step.DataTable != nil { + pickleStep.Argument = &messages.PickleStepArgument{ + DataTable: pickleDataTable(step.DataTable, variableCells, valueCells), + } + } + if step.DocString != nil { + pickleStep.Argument = &messages.PickleStepArgument{ + DocString: pickleDocString(step.DocString, variableCells, valueCells), + } + } + return pickleStep +} + +func mapType(keywordType messages.StepKeywordType, previous messages.PickleStepType) messages.PickleStepType { + switch keywordType { + case messages.StepKeywordType_UNKNOWN: + return messages.PickleStepType_UNKNOWN + case messages.StepKeywordType_CONTEXT: + return messages.PickleStepType_CONTEXT + case messages.StepKeywordType_ACTION: + return messages.PickleStepType_ACTION + case messages.StepKeywordType_OUTCOME: + return messages.PickleStepType_OUTCOME + case messages.StepKeywordType_CONJUNCTION: + return previous + default: + panic("Bad enum value for StepKeywordType") + } +} + +func interpolate(s string, variableCells []*messages.TableCell, valueCells []*messages.TableCell) string { + if variableCells == nil || valueCells == nil { + return s + } + + for i, variableCell := range variableCells { + s = strings.Replace(s, "<"+variableCell.Value+">", valueCells[i].Value, -1) + } + + return s +} diff --git a/vendor/github.com/cucumber/gherkin/go/v26/test.feature b/vendor/github.com/cucumber/gherkin/go/v26/test.feature new file mode 100644 index 000000000..dff77a22b --- /dev/null +++ b/vendor/github.com/cucumber/gherkin/go/v26/test.feature @@ -0,0 +1,151 @@ +Feature: + + Scenario: scenario 1 + Given text + + Scenario: scenario 2 + Given text + + Scenario: scenario 3 + Given text + + Scenario: scenario 4 + Given text + + Scenario: scenario 5 + Given text + + Scenario: scenario 6 + Given text + + Scenario: scenario 7 + Given text + + Scenario: scenario 8 + Given text + + Scenario: scenario 9 + Given text + + Scenario: scenario 10 + Given text + + Scenario: scenario 11 + Given text + + Scenario: scenario 12 + Given text + + Scenario: scenario 13 + Given text + + Scenario: scenario 14 + Given text + + Scenario: scenario 15 + Given text + + Scenario: scenario 16 + Given text + + Scenario: scenario 17 + Given text + + Scenario: scenario 18 + Given text + + Scenario: scenario 19 + Given text + + Scenario: scenario 20 + Given text + + Scenario: scenario 21 + Given text + + Scenario: scenario 22 + Given text + + Scenario: scenario 23 + Given text + + Scenario: scenario 24 + Given text + + Scenario: scenario 25 + Given text + + Scenario: scenario 26 + Given text + + Scenario: scenario 27 + Given text + + Scenario: scenario 28 + Given text + + Scenario: scenario 29 + Given text + + Scenario: scenario 30 + Given text + + Scenario: scenario 31 + Given text + + Scenario: scenario 32 + Given text + + Scenario: scenario 33 + Given text + + Scenario: scenario 34 + Given text + + Scenario: scenario 35 + Given text + + Scenario: scenario 36 + Given text + + Scenario: scenario 37 + Given text + + Scenario: scenario 38 + Given text + + Scenario: scenario 39 + Given text + + Scenario: scenario 40 + Given text + + Scenario: scenario 41 + Given text + + Scenario: scenario 42 + Given text + + Scenario: scenario 43 + Given text + + Scenario: scenario 44 + Given text + + Scenario: scenario 45 + Given text + + Scenario: scenario 46 + Given text + + Scenario: scenario 47 + Given text + + Scenario: scenario 48 + Given text + + Scenario: scenario 49 + Given text + + Scenario: scenario 50 + Given text diff --git a/vendor/github.com/cucumber/gherkin/go/v26/test.sh b/vendor/github.com/cucumber/gherkin/go/v26/test.sh new file mode 100644 index 000000000..97debf647 --- /dev/null +++ b/vendor/github.com/cucumber/gherkin/go/v26/test.sh @@ -0,0 +1,3 @@ +#!/usr/bin/env sh + +./bin/gherkin --no-ast --no-pickles test.feature | ./bin/gherkin --no-source --no-ast --json diff --git a/vendor/github.com/cucumber/godog/.gitignore b/vendor/github.com/cucumber/godog/.gitignore new file mode 100644 index 000000000..bd77fc9ff --- /dev/null +++ b/vendor/github.com/cucumber/godog/.gitignore @@ -0,0 +1,13 @@ +/cmd/godog/godog +/example/example +**/vendor/* +Gopkg.lock +Gopkg.toml + +.DS_Store +.idea +.vscode + +_artifacts + +vendor diff --git a/vendor/github.com/cucumber/godog/CHANGELOG.md b/vendor/github.com/cucumber/godog/CHANGELOG.md new file mode 100644 index 000000000..21a323e0a --- /dev/null +++ b/vendor/github.com/cucumber/godog/CHANGELOG.md @@ -0,0 +1,280 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +This project adheres to [Semantic Versioning](http://semver.org). + +This document is formatted according to the principles of [Keep A CHANGELOG](http://keepachangelog.com). + +## Unreleased + +## [v0.15.1] + +### Added +- Step text is added to "step is undefined" error - ([669](https://github.com/cucumber/godog/pull/669) - [vearutop](https://github.com/vearutop)) +- Localisation support by @MegaGrindStone in https://github.com/cucumber/godog/pull/665 +- feat: support uint types by @chengxilo in https://github.com/cucumber/godog/pull/695 + +### Changed +- Replace deprecated `::set-output` - ([681](https://github.com/cucumber/godog/pull/681) - [nodeg](https://github.com/nodeg)) + +### Fixed +- fix(errors): fix(errors): Fix expected Step argument count for steps with `context.Context` ([679](https://github.com/cucumber/godog/pull/679) - [tigh-latte](https://github.com/tigh-latte)) +- fix(formatter): On concurrent execution, execute formatter at end of Scenario - ([645](https://github.com/cucumber/godog/pull/645) - [tigh-latte](https://github.com/tigh-latte)) +- Pretty printing results now prints the line where the step is declared instead of the line where the handler is declared. ([668](https://github.com/cucumber/godog/pull/668) - [spencerc](https://github.com/SpencerC)) +- Update honnef.co/go/tools/cmd/staticcheck version in Makefile by @RezaZareiii in https://github.com/cucumber/godog/pull/670 +- fix: verify dogT exists in the context before using it by @cakoolen in https://github.com/cucumber/godog/pull/692 +- fix: change bang to being in README by @nahomEagleLion in https://github.com/cucumber/godog/pull/687 +- Mark junit test cases as skipped if no pickle step results available by @mrsheepuk in https://github.com/cucumber/godog/pull/597 +- Print step declaration line instead of handler declaration line by @SpencerC in https://github.com/cucumber/godog/pull/668 + +## [v0.15.0] + +### Added +- Improved the type checking of step return types and improved the error messages - ([647](https://github.com/cucumber/godog/pull/647) - [johnlon](https://github.com/johnlon)) +- Ambiguous step definitions will now be detected when strict mode is activated - ([636](https://github.com/cucumber/godog/pull/636)/([648](https://github.com/cucumber/godog/pull/648) - [johnlon](https://github.com/johnlon)) +- Provide support for attachments / embeddings including a new example in the examples dir - ([623](https://github.com/cucumber/godog/pull/623) - [johnlon](https://github.com/johnlon)) + +### Changed +- Formatters now have a `Close` method and associated `io.Writer` changed to `io.WriteCloser`. + +## [v0.14.1] + +### Added +- Provide testing.T-compatible interface on test context, allowing usage of assertion libraries such as testify's assert/require - ([571](https://github.com/cucumber/godog/pull/571) - [mrsheepuk](https://github.com/mrsheepuk)) +- Created releasing guidelines - ([608](https://github.com/cucumber/godog/pull/608) - [glibas](https://github.com/glibas)) + +### Fixed +- Step duration calculation - ([616](https://github.com/cucumber/godog/pull/616) - [iaroslav-ciupin](https://github.com/iaroslav-ciupin)) +- Invalid memory address or nil pointer dereference in RetrieveFeatures - ([566](https://github.com/cucumber/godog/pull/566) - [corneldamian](https://github.com/corneldamian)) + +## [v0.14.0] +### Added +- Improve ErrSkip handling, add test for Summary and operations order ([584](https://github.com/cucumber/godog/pull/584) - [vearutop](https://github.com/vearutop)) + +### Fixed +- Remove line overwriting for scenario outlines in cucumber formatter ([605](https://github.com/cucumber/godog/pull/605) - [glibas](https://github.com/glibas)) +- Remove duplicate warning message ([590](https://github.com/cucumber/godog/pull/590) - [vearutop](https://github.com/vearutop)) +- updated base formatter to set a scenario as passed unless there exist ([582](https://github.com/cucumber/godog/pull/582) - [roskee](https://github.com/roskee)) + +### Changed +- Update test.yml ([583](https://github.com/cucumber/godog/pull/583) - [vearutop](https://github.com/vearutop)) + +## [v0.13.0] +### Added +- Support for reading feature files from an `fs.FS` ([550](https://github.com/cucumber/godog/pull/550) - [tigh-latte](https://github.com/tigh-latte)) +- Added keyword functions. ([509](https://github.com/cucumber/godog/pull/509) - [otrava7](https://github.com/otrava7)) +- Prefer go test to use of godog cli in README ([548](https://github.com/cucumber/godog/pull/548) - [danielhelfand](https://github.com/danielhelfand)) +- Use `fs.FS` abstraction for filesystem ([550](https://github.com/cucumber/godog/pull/550) - [tigh-latte](https://github.com/tigh-latte)) +- Cancel context for each scenario ([514](https://github.com/cucumber/godog/pull/514) - [draganm](https://github.com/draganm)) + +### Fixed +- Improve hooks invocation flow ([568](https://github.com/cucumber/godog/pull/568) - [vearutop](https://github.com/vearutop)) +- Result of testing.T respect strict option ([539](https://github.com/cucumber/godog/pull/539) - [eiel](https://github.com/eiel)) + +### Changed +- BREAKING CHANGE, upgraded cucumber and messages dependencies = ([515](https://github.com/cucumber/godog/pull/515) - [otrava7](https://github.com/otrava7)) + +## [v0.12.6] +### Changed +- Each scenario is run with a cancellable `context.Context` which is cancelled at the end of the scenario. ([514](https://github.com/cucumber/godog/pull/514) - [draganm](https://github.com/draganm)) +- README example is updated with `context.Context` and `go test` usage. ([477](https://github.com/cucumber/godog/pull/477) - [vearutop](https://github.com/vearutop)) +- Removed deprecation of `godog.BindFlags`. ([498](https://github.com/cucumber/godog/pull/498) - [vearutop](https://github.com/vearutop)) +- Pretty Print when using rules. ([480](https://github.com/cucumber/godog/pull/480) - [dumpsterfireproject](https://github.com/dumpsterfireproject)) + +### Fixed +- Fixed a bug which would ignore the context returned from a substep.([488](https://github.com/cucumber/godog/pull/488) - [wichert](https://github.com/wichert)) +- Fixed a bug which would cause a panic when using the pretty formatter with a feature that contained a rule. ([480](https://github.com/cucumber/godog/pull/480) - [dumpsterfireproject](https://github.com/dumpsterfireproject)) +- Multiple invocations of AfterScenario hooks in case of undefined steps. ([494](https://github.com/cucumber/godog/pull/494) - [vearutop](https://github.com/vearutop)) +- Add a check for missing test files and raise a more helpful error. ([468](https://github.com/cucumber/godog/pull/468) - [ALCooper12](https://github.com/ALCooper12)) +- Fix version subcommand. Do not print usage if run subcommand fails. ([475](https://github.com/cucumber/godog/pull/475) - [coopernurse](https://github.com/coopernurse)) + +### Added +- Add new option for created features with parsing from byte slices. ([476](https://github.com/cucumber/godog/pull/476) - [akaswenwilk](https://github.com/akaswenwilk)) + +### Deprecated +- `godog` CLI tool prints deprecation warning. ([489](https://github.com/cucumber/godog/pull/489) - [vearutop](https://github.com/vearutop)) + +## [v0.12.5] +### Changed +- Changed underlying cobra command setup to return errors instead of calling `os.Exit` directly to enable simpler testing. ([454](https://github.com/cucumber/godog/pull/454) - [mxygem](https://github.com/mxygem)) +- Remove use of deprecated methods from `_examples`. ([460](https://github.com/cucumber/godog/pull/460) - [ricardogarfe](https://github.com/ricardogarfe)) + +### Fixed +- Support for go1.18 in `godog` cli mode ([466](https://github.com/cucumber/godog/pull/466) - [vearutop](https://github.com/vearutop)) + +## [v0.12.4] +### Added +- Allow suite-level configuration of steps and hooks ([453](https://github.com/cucumber/godog/pull/453) - [vearutop](https://github.com/vearutop)) + +## [v0.12.3] +### Added +- Automated binary releases with GitHub Actions ([437](https://github.com/cucumber/godog/pull/437) - [vearutop](https://github.com/vearutop)) +- Automated binary versioning with `go install` ([437](https://github.com/cucumber/godog/pull/437) - [vearutop](https://github.com/vearutop)) +- Module with local replace in examples ([437](https://github.com/cucumber/godog/pull/437) - [vearutop](https://github.com/vearutop)) + +### Changed +- suggest to use `go install` instead of the deprecated `go get` to install the `godog` binary ([449](https://github.com/cucumber/godog/pull/449) - [dmitris](https://github.com/dmitris)) + +### Fixed +- After Scenario hook is called before After Step ([444](https://github.com/cucumber/godog/pull/444) - [vearutop](https://github.com/vearutop)) +- `check-go-version` in Makefile to run on WSL. ([443](https://github.com/cucumber/godog/pull/443) - [mxygem](https://github.com/mxygem)) + +## [v0.12.2] +### Fixed +- Error in `go mod tidy` with `GO111MODULE=off` ([436](https://github.com/cucumber/godog/pull/436) - [vearutop](https://github.com/vearutop)) + +## [v0.12.1] +### Fixed +- Unintended change of behavior in before step hook ([424](https://github.com/cucumber/godog/pull/424) - [nhatthm](https://github.com/nhatthm)) + +## [v0.12.0] +### Added +- Support for step definitions without return ([364](https://github.com/cucumber/godog/pull/364) - [titouanfreville](https://github.com/titouanfreville)) +- Contextualized hooks for scenarios and steps ([409](https://github.com/cucumber/godog/pull/409) - [vearutop](https://github.com/vearutop)) +- Step result status in After hook ([409](https://github.com/cucumber/godog/pull/409) - [vearutop](https://github.com/vearutop)) +- Support auto converting doc strings to plain strings ([380](https://github.com/cucumber/godog/pull/380) - [chirino](https://github.com/chirino)) +- Use multiple formatters in the same test run ([392](https://github.com/cucumber/godog/pull/392) - [vearutop](https://github.com/vearutop)) +- Added `RetrieveFeatures()` method to `godog.TestSuite` ([276](https://github.com/cucumber/godog/pull/276) - [radtriste](https://github.com/radtriste)) +- Added support to create custom formatters ([372](https://github.com/cucumber/godog/pull/372) - [leviable](https://github.com/leviable)) + +### Changed +- Upgraded gherkin-go to v19 and messages-go to v16 ([402](https://github.com/cucumber/godog/pull/402) - [mbow](https://github.com/mbow)) +- Generate simpler snippets that use *godog.DocString and *godog.Table ([379](https://github.com/cucumber/godog/pull/379) - [chirino](https://github.com/chirino)) + +### Deprecated +- `ScenarioContext.BeforeScenario`, use `ScenarioContext.Before` ([409](https://github.com/cucumber/godog/pull/409)) - [vearutop](https://github.com/vearutop)) +- `ScenarioContext.AfterScenario`, use `ScenarioContext.After` ([409](https://github.com/cucumber/godog/pull/409)) - [vearutop](https://github.com/vearutop)) +- `ScenarioContext.BeforeStep`, use `ScenarioContext.StepContext().Before` ([409](https://github.com/cucumber/godog/pull/409)) - [vearutop](https://github.com/vearutop)) +- `ScenarioContext.AfterStep`, use `ScenarioContext.StepContext().After` ([409](https://github.com/cucumber/godog/pull/409)) - [vearutop](https://github.com/vearutop)) + +### Fixed +- Incorrect step definition output for Data Tables ([411](https://github.com/cucumber/godog/pull/411) - [karfrank](https://github.com/karfrank)) +- `ScenarioContext.AfterStep` not invoked after a failed case ([409](https://github.com/cucumber/godog/pull/409) - [vearutop](https://github.com/vearutop))) +- Can't execute multiple specific scenarios in the same feature file ([414](https://github.com/cucumber/godog/pull/414) - [vearutop](https://github.com/vearutop))) + +## [v0.11.0] +### Added +- Created a simple example for a custom formatter ([330](https://github.com/cucumber/godog/pull/330) - [lonnblad](https://github.com/lonnblad)) +- --format junit:result.xml will now write to result.xml ([331](https://github.com/cucumber/godog/pull/331) - [lonnblad](https://github.com/lonnblad)) +- Added make commands to create artifacts and upload them to a github release ([333](https://github.com/cucumber/godog/pull/333) - [lonnblad](https://github.com/lonnblad)) +- Created release notes and changelog for v0.11.0 ([355](https://github.com/cucumber/godog/pull/355) - [lonnblad](https://github.com/lonnblad)) +- Created v0.11.0-rc2 ([362](https://github.com/cucumber/godog/pull/362) - [lonnblad](https://github.com/lonnblad)) + +### Changed +- Added Cobra for the Command Line Interface ([321](https://github.com/cucumber/godog/pull/321) - [lonnblad](https://github.com/lonnblad)) +- Added internal packages for formatters, storage and models ([323](https://github.com/cucumber/godog/pull/323) - [lonnblad](https://github.com/lonnblad)) +- Added an internal package for tags filtering ([326](https://github.com/cucumber/godog/pull/326) - [lonnblad](https://github.com/lonnblad)) +- Added an internal pkg for the builder ([327](https://github.com/cucumber/godog/pull/327) - [lonnblad](https://github.com/lonnblad)) +- Moved the parser code to a new internal pkg ([329](https://github.com/cucumber/godog/pull/329) - [lonnblad](https://github.com/lonnblad)) +- Moved StepDefinition to the formatters pkg ([332](https://github.com/cucumber/godog/pull/332) - [lonnblad](https://github.com/lonnblad)) +- Removed go1.12 and added go1.15 to CI config ([356](https://github.com/cucumber/godog/pull/356) - [lonnblad](https://github.com/lonnblad)) + +### Fixed +- Improved the help text of the formatter flag in the run command ([347](https://github.com/cucumber/godog/pull/347) - [lonnblad](https://github.com/lonnblad)) +- Removed $GOPATH from the README.md and updated the example ([349](https://github.com/cucumber/godog/pull/349) - [lonnblad](https://github.com/lonnblad)) +- Fixed the undefined step definitions help ([350](https://github.com/cucumber/godog/pull/350) - [lonnblad](https://github.com/lonnblad)) +- Added a comment regarding running the examples within the $GOPATH ([352](https://github.com/cucumber/godog/pull/352) - [lonnblad](https://github.com/lonnblad)) +- doc(FAQ/TestMain): `testing.M.Run()` is optional ([353](https://github.com/cucumber/godog/pull/353) - [hansbogert](https://github.com/hansbogert)) +- Made a fix for the unstable Randomize Run tests ([354](https://github.com/cucumber/godog/pull/354) - [lonnblad](https://github.com/lonnblad)) +- Fixed an issue when go test is parsing command-line flags ([359](https://github.com/cucumber/godog/pull/359) - [lonnblad](https://github.com/lonnblad)) +- Make pickleStepIDs unique accross multiple paths ([366](https://github.com/cucumber/godog/pull/366) - [rickardenglund](https://github.com/rickardenglund)) + +### Removed +- Removed deprecated code ([322](https://github.com/cucumber/godog/pull/322) - [lonnblad](https://github.com/lonnblad)) + +## [v0.10.0] +### Added +- Added concurrency support to the pretty formatter ([275](https://github.com/cucumber/godog/pull/275) - [lonnblad](https://github.com/lonnblad)) +- Added concurrency support to the events formatter ([274](https://github.com/cucumber/godog/pull/274) - [lonnblad](https://github.com/lonnblad)) +- Added concurrency support to the cucumber formatter ([273](https://github.com/cucumber/godog/pull/273) - [lonnblad](https://github.com/lonnblad)) +- Added an example for how to use assertion pkgs like testify with godog ([289](https://github.com/cucumber/godog/pull/289) - [lonnblad](https://github.com/lonnblad)) +- Added the new TestSuiteInitializer and ScenarioInitializer ([294](https://github.com/cucumber/godog/pull/294) - [lonnblad](https://github.com/lonnblad)) +- Added an in-mem storage for pickles ([304](https://github.com/cucumber/godog/pull/304) - [lonnblad](https://github.com/lonnblad)) +- Added Pickle and PickleStep results to the in-mem storage ([305](https://github.com/cucumber/godog/pull/305) - [lonnblad](https://github.com/lonnblad)) +- Added features to the in-mem storage ([306](https://github.com/cucumber/godog/pull/306) - [lonnblad](https://github.com/lonnblad)) +- Broke out some code from massive files into new files ([307](https://github.com/cucumber/godog/pull/307) - [lonnblad](https://github.com/lonnblad)) +- Added support for concurrent scenarios ([311](https://github.com/cucumber/godog/pull/311) - [lonnblad](https://github.com/lonnblad)) + +### Changed +- Broke out snippets gen and added sorting on method name ([271](https://github.com/cucumber/godog/pull/271) - [lonnblad](https://github.com/lonnblad)) +- Updated so that we run all tests concurrent now ([278](https://github.com/cucumber/godog/pull/278) - [lonnblad](https://github.com/lonnblad)) +- Moved fmt tests to a godog_test pkg and restructured the fmt output tests ([295](https://github.com/cucumber/godog/pull/295) - [lonnblad](https://github.com/lonnblad)) +- Moved builder tests to a godog_test pkg ([296](https://github.com/cucumber/godog/pull/296) - [lonnblad](https://github.com/lonnblad)) +- Made the builder tests run in parallel ([298](https://github.com/cucumber/godog/pull/298) - [lonnblad](https://github.com/lonnblad)) +- Refactored suite_context.go ([300](https://github.com/cucumber/godog/pull/300) - [lonnblad](https://github.com/lonnblad)) +- Added better testing of the Context Initializers and TestSuite{}.Run() ([301](https://github.com/cucumber/godog/pull/301) - [lonnblad](https://github.com/lonnblad)) +- Updated the README.md ([302](https://github.com/cucumber/godog/pull/302) - [lonnblad](https://github.com/lonnblad)) +- Unexported some exported properties in unexported structs ([303](https://github.com/cucumber/godog/pull/303) - [lonnblad](https://github.com/lonnblad)) +- Refactored some states in the formatters and feature struct ([310](https://github.com/cucumber/godog/pull/310) - [lonnblad](https://github.com/lonnblad)) + +### Deprecated +- Deprecated SuiteContext and ConcurrentFormatter ([314](https://github.com/cucumber/godog/pull/314) - [lonnblad](https://github.com/lonnblad)) + +### Fixed +- Fixed failing builder tests due to the v0.9.0 change ([lonnblad](https://github.com/lonnblad)) +- Update paths to screenshots for examples ([270](https://github.com/cucumber/godog/pull/270) - [leviable](https://github.com/leviable)) +- Made progress formatter verification a bit more accurate ([lonnblad](https://github.com/lonnblad)) +- Added comparison between single and multi threaded runs ([272](https://github.com/cucumber/godog/pull/272) - [lonnblad](https://github.com/lonnblad)) +- Fixed issue with empty feature file causing nil pointer deref ([288](https://github.com/cucumber/godog/pull/288) - [lonnblad](https://github.com/lonnblad)) +- Updated linting checks in circleci config and fixed linting issues ([290](https://github.com/cucumber/godog/pull/290) - [lonnblad](https://github.com/lonnblad)) +- Readded some legacy doc for FeatureContext ([297](https://github.com/cucumber/godog/pull/297) - [lonnblad](https://github.com/lonnblad)) +- Fixed an issue with calculating time for junit testsuite ([308](https://github.com/cucumber/godog/pull/308) - [lonnblad](https://github.com/lonnblad)) +- Fixed so that we don't execute features with zero scenarios ([315](https://github.com/cucumber/godog/pull/315) - [lonnblad](https://github.com/lonnblad)) +- Fixed the broken --random flag ([317](https://github.com/cucumber/godog/pull/317) - [lonnblad](https://github.com/lonnblad)) + +### Removed +- Removed pre go112 build code ([293](https://github.com/cucumber/godog/pull/293) - [lonnblad](https://github.com/lonnblad)) +- Removed the deprecated feature hooks ([312](https://github.com/cucumber/godog/pull/312) - [lonnblad](https://github.com/lonnblad)) + +## [0.9.0] +### Changed +- Run godog features in CircleCI in strict mode ([mxygem](https://github.com/mxygem)) +- Removed TestMain call in `suite_test.go` for CI. ([mxygem](https://github.com/mxygem)) +- Migrated to [gherkin-go - v11.0.0](https://github.com/cucumber/gherkin-go/releases/tag/v11.0.0). ([240](https://github.com/cucumber/godog/pull/240) - [lonnblad](https://github.com/lonnblad)) + +### Fixed +- Fixed the time attributes in the JUnit formatter. ([232](https://github.com/cucumber/godog/pull/232) - [lonnblad](https://github.com/lonnblad)) +- Re enable custom formatters. ([238](https://github.com/cucumber/godog/pull/238) - [ericmcbride](https://github.com/ericmcbride)) +- Added back suite_test.go ([mxygem](https://github.com/mxygem)) +- Normalise module paths for use on Windows ([242](https://github.com/cucumber/godog/pull/242) - [gjtaylor](https://github.com/gjtaylor)) +- Fixed panic in indenting function `s` ([247](https://github.com/cucumber/godog/pull/247) - [titouanfreville](https://github.com/titouanfreville)) +- Fixed wrong version in API example ([263](https://github.com/cucumber/godog/pull/263) - [denis-trofimov](https://github.com/denis-trofimov)) + +## [0.8.1] +### Added +- Link in Readme to the Slack community. ([210](https://github.com/cucumber/godog/pull/210) - [smikulcik](https://github.com/smikulcik)) +- Added run tests for Cucumber formatting. ([214](https://github.com/cucumber/godog/pull/214), [216](https://github.com/cucumber/godog/pull/216) - [lonnblad](https://github.com/lonnblad)) + +### Changed +- Renamed the `examples` directory to `_examples`, removing dependencies from the Go module ([218](https://github.com/cucumber/godog/pull/218) - [axw](https://github.com/axw)) + +### Fixed +- Find/Replaced references to DATA-DOG/godog -> cucumber/godog for docs. ([209](https://github.com/cucumber/godog/pull/209) - [smikulcik](https://github.com/smikulcik)) +- Fixed missing links in changelog to be correctly included! ([mxygem](https://github.com/mxygem)) + +## [0.8.0] +### Added +- Added initial CircleCI config. ([mxygem](https://github.com/mxygem)) +- Added concurrency support for JUnit formatting ([lonnblad](https://github.com/lonnblad)) + +### Changed +- Changed code references to DATA-DOG/godog to cucumber/godog to help get things building correctly. ([mxygem](https://github.com/mxygem)) + +[v0.15.1]: https://github.com/cucumber/godog/compare/v0.15.0...v0.15.1 +[v0.15.0]: https://github.com/cucumber/godog/compare/v0.14.1...v0.15.0 +[v0.14.1]: https://github.com/cucumber/godog/compare/v0.14.0...v0.14.1 +[v0.14.0]: https://github.com/cucumber/godog/compare/v0.13.0...v0.14.0 +[v0.13.0]: https://github.com/cucumber/godog/compare/v0.12.6...v0.13.0 +[v0.12.6]: https://github.com/cucumber/godog/compare/v0.12.5...v0.12.6 +[v0.12.5]: https://github.com/cucumber/godog/compare/v0.12.4...v0.12.5 +[v0.12.4]: https://github.com/cucumber/godog/compare/v0.12.3...v0.12.4 +[v0.12.3]: https://github.com/cucumber/godog/compare/v0.12.2...v0.12.3 +[v0.12.2]: https://github.com/cucumber/godog/compare/v0.12.1...v0.12.2 +[v0.12.1]: https://github.com/cucumber/godog/compare/v0.12.0...v0.12.1 +[v0.12.0]: https://github.com/cucumber/godog/compare/v0.11.0...v0.12.0 +[v0.11.0]: https://github.com/cucumber/godog/compare/v0.10.0...v0.11.0 +[v0.10.0]: https://github.com/cucumber/godog/compare/v0.9.0...v0.10.0 +[0.9.0]: https://github.com/cucumber/godog/compare/v0.8.1...v0.9.0 +[0.8.1]: https://github.com/cucumber/godog/compare/v0.8.0...v0.8.1 +[0.8.0]: https://github.com/cucumber/godog/compare/v0.7.13...v0.8.0 diff --git a/vendor/github.com/cucumber/godog/CHANGELOG_OLD.md b/vendor/github.com/cucumber/godog/CHANGELOG_OLD.md new file mode 100644 index 000000000..070337965 --- /dev/null +++ b/vendor/github.com/cucumber/godog/CHANGELOG_OLD.md @@ -0,0 +1,113 @@ +# Change LOG + +**2020-02-06** +- move to new [CHANGELOG.md](CHANGELOG.md) + +**2020-01-31** +- change license to MIT and moving project repository to **cucumber** + organization. + +**2018-11-16** +- added formatter output test suite, currently mainly pretty format + tested. +- these tests, helped to identify some output format issues. + +**2018-11-12** +- proper go module support added for `godog` command build. +- added build tests. + +**2018-10-27** +- support go1.11 new compiler and linker changes for **godog** command. +- support go1.11 modules and `go mod` builds. +- `BindFlags` now has a prefix option for flags, so that `go test` command + can avoid flag name collisions. +- `BindFlags` respect default options provided for binding, so that it + does not override predefined options when flags are bind, see #144. +- Minor patch to support tag filters on example tables for + ScenarioOutline. +- Minor patch for pretty printer, when scenario has no steps, comment + possition computation was in panic. + +**2018-03-04** +- support go1.10 new compiler and linker changes for **godog** command. + +**2017-08-31** +- added **BeforeFeature** and **AfterFeature** hooks. +- failed multistep error is now prepended with a parent step text in order + to determine failed nested step. +- pretty format now removes the step definition location package name in + comment next to step if the step definition matches tested package. If + step definition is imported from other package, full package name will + be printed. + +**2017-05-04** +- added **--strict** option in order to fail suite when there are pending + or undefined steps. By default, suite passes and treats pending or + undefined steps as TODOs. + +**2017-04-29** - **v0.7.0** +- added support for nested steps. From now on, it is possible to return + **godog.Steps** instead of an **error** in the step definition func. + This change introduced few minor changes in **Formatter** interface. Be + sure to adapt the changes if you have custom formatters. + +**2017-04-27** +- added an option to randomize scenario execution order, so we could + ensure that scenarios do not depend on global state. +- godog was manually sorting feature files by name. Now it just runs them + in given order, you may sort them anyway you like. For example `godog + $(find . -name '*.feature' | sort)` + +**2016-10-30** - **v0.6.0** +- added experimental **events** format, this might be used for unified + cucumber formats. But should be not adapted widely, since it is highly + possible that specification will change. +- added **RunWithOptions** method which allows to easily run godog from + **TestMain** without needing to simulate flag arguments. These options + now allows to configure output writer. +- added flag **-o, --output=runner.binary** which only compiles the test + runner executable, but does not execute it. +- **FlagSet** initialization now takes io.Writer as output for help text + output. It was not showing nice colors on windows before. + **--no-colors** option only applies to test run output. + +**2016-06-14** - **v0.5.0** +- godog now uses **go tool compile** and **go tool link** to support + vendor directory dependencies. It also compiles test executable the same + way as standard **go test** utility. With this change, only go + versions from **1.5** are now supported. + +**2016-06-01** +- parse flags in main command, to show version and help without needing + to compile test package and buildable go sources. + +**2016-05-28** +- show nicely formatted called step func name and file path + +**2016-05-26** +- pack gherkin dependency in a subpackage to prevent compatibility + conflicts in the future. If recently upgraded, probably you will need to + reference gherkin as `github.com/DATA-DOG/godog/gherkin` instead. + +**2016-05-25** +- refactored test suite build tooling in order to use standard **go test** + tool. Which allows to compile package with godog runner script in **go** + idiomatic way. It also supports all build environment options as usual. +- **godog.Run** now returns an **int** exit status. It was not returning + anything before, so there is no compatibility breaks. + +**2016-03-04** +- added **junit** compatible output formatter, which prints **xml** + results to **os.Stdout** +- fixed #14 which skipped printing background steps when there was + scenario outline in feature. + +**2015-07-03** +- changed **godog.Suite** from interface to struct. Context registration should be updated accordingly. The reason +for change: since it exports the same methods and there is no need to mock a function in tests, there is no +obvious reason to keep an interface. +- in order to support running suite concurrently, needed to refactor an entry point of application. The **Run** method +now is a func of godog package which initializes and run the suite (or more suites). Method **New** is removed. This +change made godog a little cleaner. +- renamed **RegisterFormatter** func to **Format** to be more consistent. + diff --git a/vendor/github.com/cucumber/godog/CONTRIBUTING.md b/vendor/github.com/cucumber/godog/CONTRIBUTING.md new file mode 100644 index 000000000..c21ee4263 --- /dev/null +++ b/vendor/github.com/cucumber/godog/CONTRIBUTING.md @@ -0,0 +1,28 @@ +# Welcome 💖 + +Before anything else, thank you for taking some of your precious time to help this project move forward. ❤️ + +If you're new to open source and feeling a bit nervous 😳, we understand! We recommend watching [this excellent guide](https://egghead.io/talks/git-how-to-make-your-first-open-source-contribution) +to give you a grounding in some of the basic concepts. You could also watch [this talk](https://www.youtube.com/watch?v=tuSk6dMoTIs) from our very own wonderful [Marit van Dijk](https://github.com/mlvandijk) on her experiences contributing to Cucumber. + +We want you to feel safe to make mistakes, and ask questions. If anything in this guide or anywhere else in the codebase doesn't make sense to you, please let us know! It's through your feedback that we can make this codebase more welcoming, so we'll be glad to hear thoughts. + +You can chat with us in the `#committers` channel in our [community Discord](https://cucumber.io/docs/community/get-in-touch/#discord), or feel free to [raise an issue] if you're experiencing any friction trying make your contribution. + +## Setup + +To get your development environment set up, you'll need to [install Go]. We're currently using version 1.17 for development. + +Once that's done, try running the tests: + + make test + +If everything passes, you're ready to hack! + +[install go]: https://golang.org/doc/install +[community Discord]: https://cucumber.io/community#discord +[raise an issue]: https://github.com/cucumber/godog/issues/new/choose + +## Changing dependencies + +If dependencies have changed, you will also need to update the _examples module. `go mod tidy` should be sufficient. \ No newline at end of file diff --git a/vendor/github.com/josharian/intern/license.md b/vendor/github.com/cucumber/godog/LICENSE similarity index 95% rename from vendor/github.com/josharian/intern/license.md rename to vendor/github.com/cucumber/godog/LICENSE index 353d3055f..97dcbd65f 100644 --- a/vendor/github.com/josharian/intern/license.md +++ b/vendor/github.com/cucumber/godog/LICENSE @@ -1,6 +1,6 @@ -MIT License +The MIT License (MIT) -Copyright (c) 2019 Josh Bleecher Snyder +Copyright (c) SmartBear Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/github.com/cucumber/godog/Makefile b/vendor/github.com/cucumber/godog/Makefile new file mode 100644 index 000000000..06c95c4e0 --- /dev/null +++ b/vendor/github.com/cucumber/godog/Makefile @@ -0,0 +1,77 @@ +.PHONY: test gherkin bump cover + +VERS ?= $(shell git symbolic-ref -q --short HEAD || git describe --tags --exact-match) + +GO_MAJOR_VERSION = $(shell go version | cut -c 14- | cut -d' ' -f1 | cut -d'.' -f1) +GO_MINOR_VERSION = $(shell go version | cut -c 14- | cut -d' ' -f1 | cut -d'.' -f2) +MINIMUM_SUPPORTED_GO_MAJOR_VERSION = 1 +MINIMUM_SUPPORTED_GO_MINOR_VERSION = 16 +GO_VERSION_VALIDATION_ERR_MSG = Go version $(GO_MAJOR_VERSION).$(GO_MINOR_VERSION) is not supported, please update to at least $(MINIMUM_SUPPORTED_GO_MAJOR_VERSION).$(MINIMUM_SUPPORTED_GO_MINOR_VERSION) + +.PHONY: check-go-version +check-go-version: + @if [ $(GO_MAJOR_VERSION) -gt $(MINIMUM_SUPPORTED_GO_MAJOR_VERSION) ]; then \ + exit 0 ;\ + elif [ $(GO_MAJOR_VERSION) -lt $(MINIMUM_SUPPORTED_GO_MAJOR_VERSION) ]; then \ + echo '$(GO_VERSION_VALIDATION_ERR_MSG)';\ + exit 1; \ + elif [ $(GO_MINOR_VERSION) -lt $(MINIMUM_SUPPORTED_GO_MINOR_VERSION) ] ; then \ + echo '$(GO_VERSION_VALIDATION_ERR_MSG)';\ + exit 1; \ + fi + +test: check-go-version + @echo "running all tests" + @go fmt ./... + @go run honnef.co/go/tools/cmd/staticcheck@v0.5.1 github.com/cucumber/godog + @go run honnef.co/go/tools/cmd/staticcheck@v0.5.1 github.com/cucumber/godog/cmd/godog + go vet ./... + go test -race ./... + go run ./cmd/godog -f progress -c 4 + +gherkin: + @if [ -z "$(VERS)" ]; then echo "Provide gherkin version like: 'VERS=commit-hash'"; exit 1; fi + @rm -rf gherkin + @mkdir gherkin + @curl -s -L https://github.com/cucumber/gherkin-go/tarball/$(VERS) | tar -C gherkin -zx --strip-components 1 + @rm -rf gherkin/{.travis.yml,.gitignore,*_test.go,gherkin-generate*,*.razor,*.jq,Makefile,CONTRIBUTING.md} + +bump: + @if [ -z "$(VERSION)" ]; then echo "Provide version like: 'VERSION=$(VERS) make bump'"; exit 1; fi + @echo "bumping version from: $(VERS) to $(VERSION)" + @sed -i.bak 's/$(VERS)/$(VERSION)/g' godog.go + @sed -i.bak 's/$(VERS)/$(VERSION)/g' _examples/api/features/version.feature + @find . -name '*.bak' | xargs rm + +cover: + go test -race -coverprofile=coverage.txt + go tool cover -html=coverage.txt + rm coverage.txt + +ARTIFACT_DIR := _artifacts + +# To upload artifacts for the current version; +# execute: make upload +# +# Check https://github.com/tcnksm/ghr for usage of ghr +upload: artifacts + ghr -replace $(VERS) $(ARTIFACT_DIR) + +# To build artifacts for the current version; +# execute: make artifacts +artifacts: + rm -rf $(ARTIFACT_DIR) + mkdir $(ARTIFACT_DIR) + + $(call _build,darwin,amd64) + $(call _build,linux,amd64) + $(call _build,linux,arm64) + +define _build + mkdir $(ARTIFACT_DIR)/godog-$(VERS)-$1-$2 + env GOOS=$1 GOARCH=$2 go build -ldflags "-X github.com/cucumber/godog.Version=$(VERS)" -o $(ARTIFACT_DIR)/godog-$(VERS)-$1-$2/godog ./cmd/godog + cp README.md $(ARTIFACT_DIR)/godog-$(VERS)-$1-$2/README.md + cp LICENSE $(ARTIFACT_DIR)/godog-$(VERS)-$1-$2/LICENSE + cd $(ARTIFACT_DIR) && tar -c --use-compress-program="pigz --fast" -f godog-$(VERS)-$1-$2.tar.gz godog-$(VERS)-$1-$2 && cd .. + rm -rf $(ARTIFACT_DIR)/godog-$(VERS)-$1-$2 +endef diff --git a/vendor/github.com/cucumber/godog/README.md b/vendor/github.com/cucumber/godog/README.md new file mode 100644 index 000000000..dceacf167 --- /dev/null +++ b/vendor/github.com/cucumber/godog/README.md @@ -0,0 +1,583 @@ +[![#StandWithUkraine](https://raw.githubusercontent.com/vshymanskyy/StandWithUkraine/main/badges/StandWithUkraine.svg)](https://vshymanskyy.github.io/StandWithUkraine) +[![Build Status](https://github.com/cucumber/godog/workflows/test/badge.svg)](https://github.com/cucumber/godog/actions?query=branch%main+workflow%3Atest) +[![PkgGoDev](https://pkg.go.dev/badge/github.com/cucumber/godog)](https://pkg.go.dev/github.com/cucumber/godog) +[![codecov](https://codecov.io/gh/cucumber/godog/branch/master/graph/badge.svg)](https://codecov.io/gh/cucumber/godog) +[![pull requests](https://oselvar.com/api/badge?label=pull%20requests&csvUrl=https%3A%2F%2Fraw.githubusercontent.com%2Fcucumber%2Foselvar-github-metrics%2Fmain%2Fdata%2Fcucumber%2Fgodog%2FpullRequests.csv)](https://oselvar.com/github/cucumber/oselvar-github-metrics/main/cucumber/godog) +[![issues](https://oselvar.com/api/badge?label=issues&csvUrl=https%3A%2F%2Fraw.githubusercontent.com%2Fcucumber%2Foselvar-github-metrics%2Fmain%2Fdata%2Fcucumber%2Fgodog%2Fissues.csv)](https://oselvar.com/github/cucumber/oselvar-github-metrics/main/cucumber/godog) + +# Godog + +

Godog logo

+ +**The API is likely to change a few times before we reach 1.0.0** + +Please read the full README, you may find it very useful. And do not forget to peek into the [Release Notes](https://github.com/cucumber/godog/blob/master/release-notes) and the [CHANGELOG](https://github.com/cucumber/godog/blob/master/CHANGELOG.md) from time to time. + +Package godog is the official Cucumber BDD framework for Golang, it merges specification and test documentation into one cohesive whole, using Gherkin formatted scenarios in the format of Given, When, Then. + +The project was inspired by [behat][behat] and [cucumber][cucumber]. + +## Why Godog/Cucumber + +### A single source of truth + +Godog merges specification and test documentation into one cohesive whole. + +### Living documentation + +Because they're automatically tested by Godog, your specifications are +always being up-to-date. + +### Focus on the customer + +Business and IT don't always understand each other. Godog's executable specifications encourage closer collaboration, helping teams keep the business goal in mind at all times. + +### Less rework + +When automated testing is this much fun, teams can easily protect themselves from costly regressions. + +### Read more +- [Behaviour-Driven Development](https://cucumber.io/docs/bdd/) +- [Gherkin Reference](https://cucumber.io/docs/gherkin/reference/) + +## Contributions + +Godog is a community driven Open Source Project within the Cucumber organization. We [welcome contributions from everyone](https://cucumber.io/blog/open-source/tackling-structural-racism-(and-sexism)-in-open-so/), and we're ready to support you if you have the enthusiasm to contribute. + +See the [contributing guide] for more detail on how to get started. + +See the [releasing guide] for release flow details. + +## Getting help + +We have a [community Discord](https://cucumber.io/docs/community/get-in-touch/#discord) where you can chat with other users, developers, and BDD practitioners. + +## Examples + +You can find a few examples [here](/_examples). + +**Note** that if you want to execute any of the examples and have the Git repository checked out in the `$GOPATH`, you need to use: `GO111MODULE=off`. [Issue](https://github.com/cucumber/godog/issues/344) for reference. + +### Godogs + +The following example can be [found here](/_examples/godogs). + +#### Step 1 - Setup a go module + +Create a new go module named **godogs** in your go workspace by running `mkdir godogs` + +From now on, use **godogs** as your working directory by running `cd godogs` + +Initiate the go module inside the **godogs** directory by running `go mod init godogs` + +#### Step 2 - Create gherkin feature + +Imagine we have a **godog cart** to serve godogs for lunch. + +First of all, we describe our feature in plain text: + +``` gherkin +Feature: eat godogs + In order to be happy + As a hungry gopher + I need to be able to eat godogs + + Scenario: Eat 5 out of 12 + Given there are 12 godogs + When I eat 5 + Then there should be 7 remaining +``` + +Run `vim features/godogs.feature` and add the text above into the vim editor and save the file. + +#### Step 3 - Create godog step definitions + +**NOTE:** Same as **go test**, godog respects package level isolation. All your step definitions should be in your tested package root directory. In this case: **godogs**. + +Create and copy the step definitions below into a new file by running `vim godogs_test.go`: +``` go +package main + +import "github.com/cucumber/godog" + +func iEat(arg1 int) error { + return godog.ErrPending +} + +func thereAreGodogs(arg1 int) error { + return godog.ErrPending +} + +func thereShouldBeRemaining(arg1 int) error { + return godog.ErrPending +} + +func InitializeScenario(ctx *godog.ScenarioContext) { + ctx.Step(`^there are (\d+) godogs$`, thereAreGodogs) + ctx.Step(`^I eat (\d+)$`, iEat) + ctx.Step(`^there should be (\d+) remaining$`, thereShouldBeRemaining) +} +``` + +Alternatively, you can also specify the keyword (Given, When, Then...) when creating the step definitions: +``` go +func InitializeScenario(ctx *godog.ScenarioContext) { + ctx.Given(`^there are (\d+) godogs$`, thereAreGodogs) + ctx.When(`^I eat (\d+)$`, iEat) + ctx.Then(`^there should be (\d+) remaining$`, thereShouldBeRemaining) +} +``` + +Our module should now look like this: +``` +godogs +- features + - godogs.feature +- go.mod +- go.sum +- godogs_test.go +``` + +Run `go test` in the **godogs** directory to run the steps you have defined. You should now see that the scenario runs +with a warning stating there are no tests to run. +``` +testing: warning: no tests to run +PASS +ok godogs 0.225s +``` + +By adding some logic to these steps, you will be able to thoroughly test the feature you just defined. + +#### Step 4 - Create the main program to test + +Let's keep it simple by only requiring an amount of **godogs** for now. + +Create and copy the code below into a new file by running `vim godogs.go` +```go +package main + +// Godogs available to eat +var Godogs int + +func main() { /* usual main func */ } +``` + +Our module should now look like this: +``` +godogs +- features + - godogs.feature +- go.mod +- go.sum +- godogs.go +- godogs_test.go +``` + +#### Step 5 - Add some logic to the step definitions + +Now lets implement our step definitions to test our feature requirements. + +Replace the contents of `godogs_test.go` with the code below by running `vim godogs_test.go`. + +```go +package main + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/cucumber/godog" +) + +// godogsCtxKey is the key used to store the available godogs in the context.Context. +type godogsCtxKey struct{} + +func thereAreGodogs(ctx context.Context, available int) (context.Context, error) { + return context.WithValue(ctx, godogsCtxKey{}, available), nil +} + +func iEat(ctx context.Context, num int) (context.Context, error) { + available, ok := ctx.Value(godogsCtxKey{}).(int) + if !ok { + return ctx, errors.New("there are no godogs available") + } + + if available < num { + return ctx, fmt.Errorf("you cannot eat %d godogs, there are %d available", num, available) + } + + available -= num + + return context.WithValue(ctx, godogsCtxKey{}, available), nil +} + +func thereShouldBeRemaining(ctx context.Context, remaining int) error { + available, ok := ctx.Value(godogsCtxKey{}).(int) + if !ok { + return errors.New("there are no godogs available") + } + + if available != remaining { + return fmt.Errorf("expected %d godogs to be remaining, but there is %d", remaining, available) + } + + return nil +} + +func TestFeatures(t *testing.T) { + suite := godog.TestSuite{ + ScenarioInitializer: InitializeScenario, + Options: &godog.Options{ + Format: "pretty", + Paths: []string{"features"}, + TestingT: t, // Testing instance that will run subtests. + }, + } + + if suite.Run() != 0 { + t.Fatal("non-zero status returned, failed to run feature tests") + } +} + +func InitializeScenario(sc *godog.ScenarioContext) { + sc.Step(`^there are (\d+) godogs$`, thereAreGodogs) + sc.Step(`^I eat (\d+)$`, iEat) + sc.Step(`^there should be (\d+) remaining$`, thereShouldBeRemaining) +} +``` + +In this example, we are using `context.Context` to pass the state between the steps. +Every scenario starts with an empty context and then steps and hooks can add relevant information to it. +Instrumented context is chained through the steps and hooks and is safe to use when multiple scenarios are running concurrently. + +When you run godog again with `go test -v godogs_test.go`, you should see a passing run: +``` +=== RUN TestFeatures +Feature: eat godogs + In order to be happy + As a hungry gopher + I need to be able to eat godogs +=== RUN TestFeatures/Eat_5_out_of_12 + + Scenario: Eat 5 out of 12 # features/godogs.feature:6 + Given there are 12 godogs # godog_test.go:15 -> command-line-arguments.thereAreGodogs + When I eat 5 # godog_test.go:19 -> command-line-arguments.iEat + Then there should be 7 remaining # godog_test.go:34 -> command-line-arguments.thereShouldBeRemaining + +1 scenarios (1 passed) +3 steps (3 passed) +279.917µs +--- PASS: TestFeatures (0.00s) + --- PASS: TestFeatures/Eat_5_out_of_12 (0.00s) +PASS +ok command-line-arguments 0.164s +``` + +You may hook to `ScenarioContext` **Before** event in order to reset or pre-seed the application state before each scenario. +You may hook into more events, like `sc.StepContext()` **After** to print all state in case of an error. +Or **BeforeSuite** to prepare a database. + +By now, you should have figured out, how to use **godog**. Another piece of advice is to make steps orthogonal, small and simple to read for a user. +Whether the user is a dumb website user or an API developer, who may understand a little more technical context - it should target that user. + +When steps are orthogonal and small, you can combine them just like you do with Unix tools. Look how to simplify or remove ones, which can be composed. + +`TestFeatures` acts as a regular Go test, so you can leverage your IDE facilities to run and debug it. + +### Attachments + +An example showing how to make attachments (aka embeddings) to the results is shown in [_examples/attachments](/_examples/attachments/) + +## Code of Conduct + +Everyone interacting in this codebase and issue tracker is expected to follow the Cucumber [code of conduct](https://github.com/cucumber/cucumber/blob/master/CODE_OF_CONDUCT.md). + +## References and Tutorials + +- [cucumber-html-reporter](https://github.com/gkushang/cucumber-html-reporter), + may be used in order to generate **html** reports together with **cucumber** output formatter. See the [following docker image](https://github.com/myie/cucumber-html-reporter) for usage details. +- [how to use godog by semaphoreci](https://semaphoreci.com/community/tutorials/how-to-use-godog-for-behavior-driven-development-in-go) +- see [examples](https://github.com/cucumber/godog/tree/master/_examples) +- see extension [AssistDog](https://github.com/hellomd/assistdog), + which may have useful **gherkin.DataTable** transformations or comparison methods for assertions. + +## Documentation + +See [pkg documentation][godoc] for general API details. +See **[Circle Config](/.circleci/config.yml)** for supported **go** versions. +See `godog -h` for general command options. + +See implementation examples: + +- [rest API server](/_examples/api) +- [rest API with Database](/_examples/db) +- [godogs](/_examples/godogs) + +## FAQ + +### Running Godog with go test + +You may integrate running **godog** in your **go test** command. + +#### Subtests of *testing.T + +You can run test suite using go [Subtests](https://pkg.go.dev/testing#hdr-Subtests_and_Sub_benchmarks). +In this case it is not necessary to have **godog** command installed. See the following example. + +```go +package main_test + +import ( + "testing" + + "github.com/cucumber/godog" +) + +func TestFeatures(t *testing.T) { + suite := godog.TestSuite{ + ScenarioInitializer: func(s *godog.ScenarioContext) { + // Add step definitions here. + }, + Options: &godog.Options{ + Format: "pretty", + Paths: []string{"features"}, + TestingT: t, // Testing instance that will run subtests. + }, + } + + if suite.Run() != 0 { + t.Fatal("non-zero status returned, failed to run feature tests") + } +} +``` + +Then you can run suite. +``` +go test -test.v -test.run ^TestFeatures$ +``` + +Or a particular scenario. +``` +go test -test.v -test.run ^TestFeatures$/^my_scenario$ +``` + +#### TestMain + +You can run test suite using go [TestMain](https://golang.org/pkg/testing/#hdr-Main) func available since **go 1.4**. +In this case it is not necessary to have **godog** command installed. See the following examples. + +The following example binds **godog** flags with specified prefix `godog` in order to prevent flag collisions. + +```go +package main + +import ( + "os" + "testing" + + "github.com/cucumber/godog" + "github.com/cucumber/godog/colors" + "github.com/spf13/pflag" // godog v0.11.0 and later +) + +var opts = godog.Options{ + Output: colors.Colored(os.Stdout), + Format: "progress", // can define default values +} + +func init() { + godog.BindFlags("godog.", pflag.CommandLine, &opts) // godog v0.10.0 and earlier + godog.BindCommandLineFlags("godog.", &opts) // godog v0.11.0 and later +} + +func TestMain(m *testing.M) { + pflag.Parse() + opts.Paths = pflag.Args() + + status := godog.TestSuite{ + Name: "godogs", + TestSuiteInitializer: InitializeTestSuite, + ScenarioInitializer: InitializeScenario, + Options: &opts, + }.Run() + + // Optional: Run `testing` package's logic besides godog. + if st := m.Run(); st > status { + status = st + } + + os.Exit(status) +} +``` + +Then you may run tests with by specifying flags in order to filter features. + +``` +go test -v --godog.random --godog.tags=wip +go test -v --godog.format=pretty --godog.random -race -coverprofile=coverage.txt -covermode=atomic +``` + +The following example does not bind godog flags, instead manually configuring needed options. + +```go +func TestMain(m *testing.M) { + opts := godog.Options{ + Format: "progress", + Paths: []string{"features"}, + Randomize: time.Now().UTC().UnixNano(), // randomize scenario execution order + } + + status := godog.TestSuite{ + Name: "godogs", + TestSuiteInitializer: InitializeTestSuite, + ScenarioInitializer: InitializeScenario, + Options: &opts, + }.Run() + + // Optional: Run `testing` package's logic besides godog. + if st := m.Run(); st > status { + status = st + } + + os.Exit(status) +} +``` + +You can even go one step further and reuse **go test** flags, like **verbose** mode in order to switch godog **format**. See the following example: + +```go +func TestMain(m *testing.M) { + format := "progress" + for _, arg := range os.Args[1:] { + if arg == "-test.v=true" { // go test transforms -v option + format = "pretty" + break + } + } + + opts := godog.Options{ + Format: format, + Paths: []string{"features"}, + } + + status := godog.TestSuite{ + Name: "godogs", + TestSuiteInitializer: InitializeTestSuite, + ScenarioInitializer: InitializeScenario, + Options: &opts, + }.Run() + + // Optional: Run `testing` package's logic besides godog. + if st := m.Run(); st > status { + status = st + } + + os.Exit(status) +} +``` + +Now when running `go test -v` it will use **pretty** format. + +### Tags + +If you want to filter scenarios by tags, you can use the `-t=` or `--tags=` where `` is one of the following: + +- `@wip` - run all scenarios with wip tag +- `~@wip` - exclude all scenarios with wip tag +- `@wip && ~@new` - run wip scenarios, but exclude new +- `@wip,@undone` - run wip or undone scenarios + +### Using assertion packages like testify with Godog +A more extensive example can be [found here](/_examples/assert-godogs). + +```go +func thereShouldBeRemaining(ctx context.Context, remaining int) error { + assert.Equal( + godog.T(ctx), Godogs, remaining, + "Expected %d godogs to be remaining, but there is %d", remaining, Godogs, + ) + return nil +} +``` + +### Embeds + +If you're looking to compile your test binary in advance of running, you can compile the feature files into the binary via `go:embed`: + +```go + +//go:embed features/* +var features embed.FS + +var opts = godog.Options{ + Paths: []string{"features"}, + FS: features, +} +``` + +Now, the test binary can be compiled with all feature files embedded, and can be ran independently from the feature files: + +```sh +> go test -c ./test/integration/integration_test.go +> mv integration.test /some/random/dir +> cd /some/random/dir +> ./integration.test +``` + +**NOTE:** `godog.Options.FS` is as `fs.FS`, so custom filesystem loaders can be used. + +## CLI Mode + +**NOTE:** The [`godog` CLI has been deprecated](https://github.com/cucumber/godog/discussions/478). It is recommended to use `go test` instead. + +Another way to use `godog` is to run it in CLI mode. + +In this mode `godog` CLI will use `go` under the hood to compile and run your test suite. + +**Godog** does not intervene with the standard **go test** command behavior. You can leverage both frameworks to functionally test your application while maintaining all test related source code in **_test.go** files. + +**Godog** acts similar compared to **go test** command, by using go compiler and linker tool in order to produce test executable. Godog contexts need to be exported the same way as **Test** functions for go tests. Note, that if you use **godog** command tool, it will use `go` executable to determine compiler and linker. + +### Install +``` +go install github.com/cucumber/godog/cmd/godog@latest +``` +Adding `@v0.12.0` will install v0.12.0 specifically instead of master. + +With `go` version prior to 1.17, use `go get github.com/cucumber/godog/cmd/godog@v0.12.0`. +Running `within the $GOPATH`, you would also need to set `GO111MODULE=on`, like this: +``` +GO111MODULE=on go get github.com/cucumber/godog/cmd/godog@v0.12.0 +``` + +### Configure common options for godog CLI + +There are no global options or configuration files. Alias your common or project based commands: `alias godog-wip="godog --format=progress --tags=@wip"` + +## Concurrency + +When concurrency is configured in options, godog will execute the scenarios concurrently, which is supported by all supplied formatters. + +In order to support concurrency well, you should reset the state and isolate each scenario. They should not share any state. It is suggested to run the suite concurrently in order to make sure there is no state corruption or race conditions in the application. + +It is also useful to randomize the order of scenario execution, which you can now do with `--random` command option or `godog.Options.Randomize` setting. + +### Building your own custom formatter +A simple example can be [found here](/_examples/custom-formatter). + +## License +**Godog** and **Gherkin** are licensed under the [MIT][license] and developed as a part of the [cucumber project][cucumber] + +[godoc]: https://pkg.go.dev/github.com/cucumber/godog "Documentation on godog" +[golang]: https://golang.org/ "GO programming language" +[behat]: http://docs.behat.org/ "Behavior driven development framework for PHP" +[cucumber]: https://cucumber.io/ "Behavior driven development framework" +[license]: https://en.wikipedia.org/wiki/MIT_License "The MIT license" +[contributing guide]: https://github.com/cucumber/godog/blob/main/CONTRIBUTING.md +[releasing guide]: https://github.com/cucumber/godog/blob/main/RELEASING.md +[community Discord]: https://cucumber.io/community#discord + + + diff --git a/vendor/github.com/cucumber/godog/RELEASING.md b/vendor/github.com/cucumber/godog/RELEASING.md new file mode 100644 index 000000000..cba243657 --- /dev/null +++ b/vendor/github.com/cucumber/godog/RELEASING.md @@ -0,0 +1,67 @@ +# Releasing Guidelines for Cucumber Godog + +This document provides guidelines for releasing new versions of Cucumber Godog. Follow these steps to ensure a smooth and consistent release process. + +## Versioning + +Cucumber Godog follows [Semantic Versioning]. Version numbers are in the format `MAJOR.MINOR.PATCH`. + +### Current (for v0.MINOR.PATCH) + +- **MINOR**: Incompatible API changes. +- **PATCH**: Backward-compatible new features and bug fixes. + +### After v1.X.X release + +- **MAJOR**: Incompatible API changes. +- **MINOR**: Backward-compatible new features. +- **PATCH**: Backward-compatible bug fixes. + +## Release Process + +1. **Update Changelog:** + - Open `CHANGELOG.md` and add an entry for the upcoming release formatting according to the principles of [Keep A CHANGELOG]. + - Include details about new features, enhancements, and bug fixes. + +2. **Run Tests:** + - Run the test suite to ensure all existing features are working as expected. + +3. **Manual Testing for Backwards Compatibility:** + - Manually test the new release with external libraries that depend on Cucumber Godog. + - Look for any potential backwards compatibility issues, especially with widely-used libraries. + - Address any identified issues before proceeding. + +4. **Create Release on GitHub:** + - Go to the [Releases] page on GitHub. + - Click on "Draft a new release." + - Tag version should be set to the new tag vMAJOR.MINOR.PATCH + - Title the release using the version number (e.g., "vMAJOR.MINOR.PATCH"). + - Click 'Generate release notes' + +5. **Publish Release:** + - Click "Publish release" to make the release public. + +6. **Announce the Release:** + - Make an announcement on relevant communication channels (e.g., [community Discord]) about the new release. + +## Additional Considerations + +- **Documentation:** + - Update the project documentation on the [website], if applicable. + +- **Deprecation Notices:** + - If any features are deprecated, clearly document them in the release notes and provide guidance on migration. + +- **Compatibility:** + - Clearly state any compatibility requirements or changes in the release notes. + +- **Feedback:** + - Encourage users to provide feedback and report any issues with the new release. + +Following these guidelines, including manual testing with external libraries, will help ensure a thorough release process for Cucumber Godog, allowing detection and resolution of potential backwards compatibility issues before tagging the release. + +[community Discord]: https://cucumber.io/community#discord +[website]: https://cucumber.github.io/godog/ +[Releases]: https://github.com/cucumber/godog/releases +[Semantic Versioning]: http://semver.org +[Keep A CHANGELOG]: http://keepachangelog.com \ No newline at end of file diff --git a/vendor/github.com/cucumber/godog/codecov.yml b/vendor/github.com/cucumber/godog/codecov.yml new file mode 100644 index 000000000..1418fc73d --- /dev/null +++ b/vendor/github.com/cucumber/godog/codecov.yml @@ -0,0 +1,8 @@ +coverage: + status: + project: + default: + threshold: 0.5% + patch: + default: + threshold: 0.5% diff --git a/vendor/github.com/cucumber/godog/colors/ansi_others.go b/vendor/github.com/cucumber/godog/colors/ansi_others.go new file mode 100644 index 000000000..6a166079f --- /dev/null +++ b/vendor/github.com/cucumber/godog/colors/ansi_others.go @@ -0,0 +1,19 @@ +// Copyright 2014 shiena Authors. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +//go:build !windows +// +build !windows + +package colors + +import "io" + +type ansiColorWriter struct { + w io.Writer + mode outputMode +} + +func (cw *ansiColorWriter) Write(p []byte) (int, error) { + return cw.w.Write(p) +} diff --git a/vendor/github.com/cucumber/godog/colors/ansi_windows.go b/vendor/github.com/cucumber/godog/colors/ansi_windows.go new file mode 100644 index 000000000..8a92c8223 --- /dev/null +++ b/vendor/github.com/cucumber/godog/colors/ansi_windows.go @@ -0,0 +1,418 @@ +// Copyright 2014 shiena Authors. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +//go:build windows +// +build windows + +package colors + +import ( + "bytes" + "io" + "strings" + "syscall" + "unsafe" +) + +type csiState int + +const ( + outsideCsiCode csiState = iota + firstCsiCode + secondCsiCode +) + +type parseResult int + +const ( + noConsole parseResult = iota + changedColor + unknown +) + +type ansiColorWriter struct { + w io.Writer + mode outputMode + state csiState + paramStartBuf bytes.Buffer + paramBuf bytes.Buffer +} + +const ( + firstCsiChar byte = '\x1b' + secondeCsiChar byte = '[' + separatorChar byte = ';' + sgrCode byte = 'm' +) + +const ( + foregroundBlue = uint16(0x0001) + foregroundGreen = uint16(0x0002) + foregroundRed = uint16(0x0004) + foregroundIntensity = uint16(0x0008) + backgroundBlue = uint16(0x0010) + backgroundGreen = uint16(0x0020) + backgroundRed = uint16(0x0040) + backgroundIntensity = uint16(0x0080) + underscore = uint16(0x8000) + + foregroundMask = foregroundBlue | foregroundGreen | foregroundRed | foregroundIntensity + backgroundMask = backgroundBlue | backgroundGreen | backgroundRed | backgroundIntensity +) + +const ( + ansiReset = "0" + ansiIntensityOn = "1" + ansiIntensityOff = "21" + ansiUnderlineOn = "4" + ansiUnderlineOff = "24" + ansiBlinkOn = "5" + ansiBlinkOff = "25" + + ansiForegroundBlack = "30" + ansiForegroundRed = "31" + ansiForegroundGreen = "32" + ansiForegroundYellow = "33" + ansiForegroundBlue = "34" + ansiForegroundMagenta = "35" + ansiForegroundCyan = "36" + ansiForegroundWhite = "37" + ansiForegroundDefault = "39" + + ansiBackgroundBlack = "40" + ansiBackgroundRed = "41" + ansiBackgroundGreen = "42" + ansiBackgroundYellow = "43" + ansiBackgroundBlue = "44" + ansiBackgroundMagenta = "45" + ansiBackgroundCyan = "46" + ansiBackgroundWhite = "47" + ansiBackgroundDefault = "49" + + ansiLightForegroundGray = "90" + ansiLightForegroundRed = "91" + ansiLightForegroundGreen = "92" + ansiLightForegroundYellow = "93" + ansiLightForegroundBlue = "94" + ansiLightForegroundMagenta = "95" + ansiLightForegroundCyan = "96" + ansiLightForegroundWhite = "97" + + ansiLightBackgroundGray = "100" + ansiLightBackgroundRed = "101" + ansiLightBackgroundGreen = "102" + ansiLightBackgroundYellow = "103" + ansiLightBackgroundBlue = "104" + ansiLightBackgroundMagenta = "105" + ansiLightBackgroundCyan = "106" + ansiLightBackgroundWhite = "107" +) + +type drawType int + +const ( + foreground drawType = iota + background +) + +type winColor struct { + code uint16 + drawType drawType +} + +var colorMap = map[string]winColor{ + ansiForegroundBlack: {0, foreground}, + ansiForegroundRed: {foregroundRed, foreground}, + ansiForegroundGreen: {foregroundGreen, foreground}, + ansiForegroundYellow: {foregroundRed | foregroundGreen, foreground}, + ansiForegroundBlue: {foregroundBlue, foreground}, + ansiForegroundMagenta: {foregroundRed | foregroundBlue, foreground}, + ansiForegroundCyan: {foregroundGreen | foregroundBlue, foreground}, + ansiForegroundWhite: {foregroundRed | foregroundGreen | foregroundBlue, foreground}, + ansiForegroundDefault: {foregroundRed | foregroundGreen | foregroundBlue, foreground}, + + ansiBackgroundBlack: {0, background}, + ansiBackgroundRed: {backgroundRed, background}, + ansiBackgroundGreen: {backgroundGreen, background}, + ansiBackgroundYellow: {backgroundRed | backgroundGreen, background}, + ansiBackgroundBlue: {backgroundBlue, background}, + ansiBackgroundMagenta: {backgroundRed | backgroundBlue, background}, + ansiBackgroundCyan: {backgroundGreen | backgroundBlue, background}, + ansiBackgroundWhite: {backgroundRed | backgroundGreen | backgroundBlue, background}, + ansiBackgroundDefault: {0, background}, + + ansiLightForegroundGray: {foregroundIntensity, foreground}, + ansiLightForegroundRed: {foregroundIntensity | foregroundRed, foreground}, + ansiLightForegroundGreen: {foregroundIntensity | foregroundGreen, foreground}, + ansiLightForegroundYellow: {foregroundIntensity | foregroundRed | foregroundGreen, foreground}, + ansiLightForegroundBlue: {foregroundIntensity | foregroundBlue, foreground}, + ansiLightForegroundMagenta: {foregroundIntensity | foregroundRed | foregroundBlue, foreground}, + ansiLightForegroundCyan: {foregroundIntensity | foregroundGreen | foregroundBlue, foreground}, + ansiLightForegroundWhite: {foregroundIntensity | foregroundRed | foregroundGreen | foregroundBlue, foreground}, + + ansiLightBackgroundGray: {backgroundIntensity, background}, + ansiLightBackgroundRed: {backgroundIntensity | backgroundRed, background}, + ansiLightBackgroundGreen: {backgroundIntensity | backgroundGreen, background}, + ansiLightBackgroundYellow: {backgroundIntensity | backgroundRed | backgroundGreen, background}, + ansiLightBackgroundBlue: {backgroundIntensity | backgroundBlue, background}, + ansiLightBackgroundMagenta: {backgroundIntensity | backgroundRed | backgroundBlue, background}, + ansiLightBackgroundCyan: {backgroundIntensity | backgroundGreen | backgroundBlue, background}, + ansiLightBackgroundWhite: {backgroundIntensity | backgroundRed | backgroundGreen | backgroundBlue, background}, +} + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute") + procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") + defaultAttr *textAttributes +) + +func init() { + screenInfo := getConsoleScreenBufferInfo(uintptr(syscall.Stdout)) + if screenInfo != nil { + colorMap[ansiForegroundDefault] = winColor{ + screenInfo.WAttributes & (foregroundRed | foregroundGreen | foregroundBlue), + foreground, + } + colorMap[ansiBackgroundDefault] = winColor{ + screenInfo.WAttributes & (backgroundRed | backgroundGreen | backgroundBlue), + background, + } + defaultAttr = convertTextAttr(screenInfo.WAttributes) + } +} + +type coord struct { + X, Y int16 +} + +type smallRect struct { + Left, Top, Right, Bottom int16 +} + +type consoleScreenBufferInfo struct { + DwSize coord + DwCursorPosition coord + WAttributes uint16 + SrWindow smallRect + DwMaximumWindowSize coord +} + +func getConsoleScreenBufferInfo(hConsoleOutput uintptr) *consoleScreenBufferInfo { + var csbi consoleScreenBufferInfo + ret, _, _ := procGetConsoleScreenBufferInfo.Call( + hConsoleOutput, + uintptr(unsafe.Pointer(&csbi))) + if ret == 0 { + return nil + } + return &csbi +} + +func setConsoleTextAttribute(hConsoleOutput uintptr, wAttributes uint16) bool { + ret, _, _ := procSetConsoleTextAttribute.Call( + hConsoleOutput, + uintptr(wAttributes)) + return ret != 0 +} + +type textAttributes struct { + foregroundColor uint16 + backgroundColor uint16 + foregroundIntensity uint16 + backgroundIntensity uint16 + underscore uint16 + otherAttributes uint16 +} + +func convertTextAttr(winAttr uint16) *textAttributes { + fgColor := winAttr & (foregroundRed | foregroundGreen | foregroundBlue) + bgColor := winAttr & (backgroundRed | backgroundGreen | backgroundBlue) + fgIntensity := winAttr & foregroundIntensity + bgIntensity := winAttr & backgroundIntensity + underline := winAttr & underscore + otherAttributes := winAttr &^ (foregroundMask | backgroundMask | underscore) + return &textAttributes{fgColor, bgColor, fgIntensity, bgIntensity, underline, otherAttributes} +} + +func convertWinAttr(textAttr *textAttributes) uint16 { + var winAttr uint16 + winAttr |= textAttr.foregroundColor + winAttr |= textAttr.backgroundColor + winAttr |= textAttr.foregroundIntensity + winAttr |= textAttr.backgroundIntensity + winAttr |= textAttr.underscore + winAttr |= textAttr.otherAttributes + return winAttr +} + +func changeColor(param []byte) parseResult { + screenInfo := getConsoleScreenBufferInfo(uintptr(syscall.Stdout)) + if screenInfo == nil { + return noConsole + } + + winAttr := convertTextAttr(screenInfo.WAttributes) + strParam := string(param) + if len(strParam) <= 0 { + strParam = "0" + } + csiParam := strings.Split(strParam, string(separatorChar)) + for _, p := range csiParam { + c, ok := colorMap[p] + switch { + case !ok: + switch p { + case ansiReset: + winAttr.foregroundColor = defaultAttr.foregroundColor + winAttr.backgroundColor = defaultAttr.backgroundColor + winAttr.foregroundIntensity = defaultAttr.foregroundIntensity + winAttr.backgroundIntensity = defaultAttr.backgroundIntensity + winAttr.underscore = 0 + winAttr.otherAttributes = 0 + case ansiIntensityOn: + winAttr.foregroundIntensity = foregroundIntensity + case ansiIntensityOff: + winAttr.foregroundIntensity = 0 + case ansiUnderlineOn: + winAttr.underscore = underscore + case ansiUnderlineOff: + winAttr.underscore = 0 + case ansiBlinkOn: + winAttr.backgroundIntensity = backgroundIntensity + case ansiBlinkOff: + winAttr.backgroundIntensity = 0 + default: + // unknown code + } + case c.drawType == foreground: + winAttr.foregroundColor = c.code + case c.drawType == background: + winAttr.backgroundColor = c.code + } + } + winTextAttribute := convertWinAttr(winAttr) + setConsoleTextAttribute(uintptr(syscall.Stdout), winTextAttribute) + + return changedColor +} + +func parseEscapeSequence(command byte, param []byte) parseResult { + if defaultAttr == nil { + return noConsole + } + + switch command { + case sgrCode: + return changeColor(param) + default: + return unknown + } +} + +func (cw *ansiColorWriter) flushBuffer() (int, error) { + return cw.flushTo(cw.w) +} + +func (cw *ansiColorWriter) resetBuffer() (int, error) { + return cw.flushTo(nil) +} + +func (cw *ansiColorWriter) flushTo(w io.Writer) (int, error) { + var n1, n2 int + var err error + + startBytes := cw.paramStartBuf.Bytes() + cw.paramStartBuf.Reset() + if w != nil { + n1, err = cw.w.Write(startBytes) + if err != nil { + return n1, err + } + } else { + n1 = len(startBytes) + } + paramBytes := cw.paramBuf.Bytes() + cw.paramBuf.Reset() + if w != nil { + n2, err = cw.w.Write(paramBytes) + if err != nil { + return n1 + n2, err + } + } else { + n2 = len(paramBytes) + } + return n1 + n2, nil +} + +func isParameterChar(b byte) bool { + return ('0' <= b && b <= '9') || b == separatorChar +} + +func (cw *ansiColorWriter) Write(p []byte) (int, error) { + r, nw, first, last := 0, 0, 0, 0 + if cw.mode != discardNonColorEscSeq { + cw.state = outsideCsiCode + cw.resetBuffer() + } + + var err error + for i, ch := range p { + switch cw.state { + case outsideCsiCode: + if ch == firstCsiChar { + cw.paramStartBuf.WriteByte(ch) + cw.state = firstCsiCode + } + case firstCsiCode: + switch ch { + case firstCsiChar: + cw.paramStartBuf.WriteByte(ch) + break + case secondeCsiChar: + cw.paramStartBuf.WriteByte(ch) + cw.state = secondCsiCode + last = i - 1 + default: + cw.resetBuffer() + cw.state = outsideCsiCode + } + case secondCsiCode: + if isParameterChar(ch) { + cw.paramBuf.WriteByte(ch) + } else { + nw, err = cw.w.Write(p[first:last]) + r += nw + if err != nil { + return r, err + } + first = i + 1 + result := parseEscapeSequence(ch, cw.paramBuf.Bytes()) + if result == noConsole || (cw.mode == outputNonColorEscSeq && result == unknown) { + cw.paramBuf.WriteByte(ch) + nw, err := cw.flushBuffer() + if err != nil { + return r, err + } + r += nw + } else { + n, _ := cw.resetBuffer() + // Add one more to the size of the buffer for the last ch + r += n + 1 + } + + cw.state = outsideCsiCode + } + default: + cw.state = outsideCsiCode + } + } + + if cw.mode != discardNonColorEscSeq || cw.state == outsideCsiCode { + nw, err = cw.w.Write(p[first:]) + r += nw + } + + return r, err +} diff --git a/vendor/github.com/cucumber/godog/colors/colors.go b/vendor/github.com/cucumber/godog/colors/colors.go new file mode 100644 index 000000000..be7722e95 --- /dev/null +++ b/vendor/github.com/cucumber/godog/colors/colors.go @@ -0,0 +1,68 @@ +package colors + +import ( + "fmt" + "strings" +) + +const ansiEscape = "\x1b" + +// a color code type +type color int + +// some ansi colors +const ( + black color = iota + 30 + red + green + yellow + blue // unused + magenta // unused + cyan + white +) + +func colorize(s interface{}, c color) string { + return fmt.Sprintf("%s[%dm%v%s[0m", ansiEscape, c, s, ansiEscape) +} + +// ColorFunc is a helper type to create colorized strings. +type ColorFunc func(interface{}) string + +// Bold will accept a ColorFunc and return a new ColorFunc +// that will make the string bold. +func Bold(fn ColorFunc) ColorFunc { + return ColorFunc(func(input interface{}) string { + return strings.Replace(fn(input), ansiEscape+"[", ansiEscape+"[1;", 1) + }) +} + +// Green will accept an interface and return a colorized green string. +func Green(s interface{}) string { + return colorize(s, green) +} + +// Red will accept an interface and return a colorized red string. +func Red(s interface{}) string { + return colorize(s, red) +} + +// Cyan will accept an interface and return a colorized cyan string. +func Cyan(s interface{}) string { + return colorize(s, cyan) +} + +// Black will accept an interface and return a colorized black string. +func Black(s interface{}) string { + return colorize(s, black) +} + +// Yellow will accept an interface and return a colorized yellow string. +func Yellow(s interface{}) string { + return colorize(s, yellow) +} + +// White will accept an interface and return a colorized white string. +func White(s interface{}) string { + return colorize(s, white) +} diff --git a/vendor/github.com/cucumber/godog/colors/no_colors.go b/vendor/github.com/cucumber/godog/colors/no_colors.go new file mode 100644 index 000000000..2eeb80243 --- /dev/null +++ b/vendor/github.com/cucumber/godog/colors/no_colors.go @@ -0,0 +1,59 @@ +package colors + +import ( + "bytes" + "fmt" + "io" +) + +type noColors struct { + out io.Writer + lastbuf bytes.Buffer +} + +// Uncolored will accept and io.Writer and return a +// new io.Writer that won't include colors. +func Uncolored(w io.Writer) io.Writer { + return &noColors{out: w} +} + +func (w *noColors) Write(data []byte) (n int, err error) { + er := bytes.NewBuffer(data) +loop: + for { + c1, _, err := er.ReadRune() + if err != nil { + break loop + } + if c1 != 0x1b { + fmt.Fprint(w.out, string(c1)) + continue + } + c2, _, err := er.ReadRune() + if err != nil { + w.lastbuf.WriteRune(c1) + break loop + } + if c2 != 0x5b { + w.lastbuf.WriteRune(c1) + w.lastbuf.WriteRune(c2) + continue + } + + var buf bytes.Buffer + for { + c, _, err := er.ReadRune() + if err != nil { + w.lastbuf.WriteRune(c1) + w.lastbuf.WriteRune(c2) + w.lastbuf.Write(buf.Bytes()) + break loop + } + if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { + break + } + buf.Write([]byte(string(c))) + } + } + return len(data) - w.lastbuf.Len(), nil +} diff --git a/vendor/github.com/cucumber/godog/colors/writer.go b/vendor/github.com/cucumber/godog/colors/writer.go new file mode 100644 index 000000000..469c7a5ed --- /dev/null +++ b/vendor/github.com/cucumber/godog/colors/writer.go @@ -0,0 +1,41 @@ +// Copyright 2014 shiena Authors. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package colors + +import "io" + +type outputMode int + +// DiscardNonColorEscSeq supports the divided color escape sequence. +// But non-color escape sequence is not output. +// Please use the OutputNonColorEscSeq If you want to output a non-color +// escape sequences such as ncurses. However, it does not support the divided +// color escape sequence. +const ( + _ outputMode = iota + discardNonColorEscSeq + outputNonColorEscSeq // unused +) + +// Colored creates and initializes a new ansiColorWriter +// using io.Writer w as its initial contents. +// In the console of Windows, which change the foreground and background +// colors of the text by the escape sequence. +// In the console of other systems, which writes to w all text. +func Colored(w io.Writer) io.Writer { + return createModeAnsiColorWriter(w, discardNonColorEscSeq) +} + +// NewModeAnsiColorWriter create and initializes a new ansiColorWriter +// by specifying the outputMode. +func createModeAnsiColorWriter(w io.Writer, mode outputMode) io.Writer { + if _, ok := w.(*ansiColorWriter); !ok { + return &ansiColorWriter{ + w: w, + mode: mode, + } + } + return w +} diff --git a/vendor/github.com/cucumber/godog/flags.go b/vendor/github.com/cucumber/godog/flags.go new file mode 100644 index 000000000..45efbfec7 --- /dev/null +++ b/vendor/github.com/cucumber/godog/flags.go @@ -0,0 +1,255 @@ +package godog + +import ( + "flag" + "fmt" + "io" + "sort" + "strconv" + "strings" + + "github.com/cucumber/godog/colors" + "github.com/cucumber/godog/internal/utils" +) + +// repeats a space n times +var s = utils.S + +var descFeaturesArgument = "Optional feature(s) to run. Can be:\n" + + s(4) + "- dir " + colors.Yellow("(features/)") + "\n" + + s(4) + "- feature " + colors.Yellow("(*.feature)") + "\n" + + s(4) + "- scenario at specific line " + colors.Yellow("(*.feature:10)") + "\n" + + "If no feature paths are listed, suite tries " + colors.Yellow("features") + " path by default.\n" + + "Multiple comma-separated values can be provided.\n" + +var descConcurrencyOption = "Run the test suite with concurrency level:\n" + + s(4) + "- " + colors.Yellow(`= 1`) + ": supports all types of formats.\n" + + s(4) + "- " + colors.Yellow(`>= 2`) + ": only supports " + colors.Yellow("progress") + ". Note, that\n" + + s(4) + "your context needs to support parallel execution." + +var descTagsOption = "Filter scenarios by tags. Expression can be:\n" + + s(4) + "- " + colors.Yellow(`"@wip"`) + ": run all scenarios with wip tag\n" + + s(4) + "- " + colors.Yellow(`"~@wip"`) + ": exclude all scenarios with wip tag\n" + + s(4) + "- " + colors.Yellow(`"@wip && ~@new"`) + ": run wip scenarios, but exclude new\n" + + s(4) + "- " + colors.Yellow(`"@wip,@undone"`) + ": run wip or undone scenarios" + +var descRandomOption = "Randomly shuffle the scenario execution order.\n" + + "Specify SEED to reproduce the shuffling from a previous run.\n" + + s(4) + `e.g. ` + colors.Yellow(`--random`) + " or " + colors.Yellow(`--random=5738`) + +// FlagSet allows to manage flags by external suite runner +// builds flag.FlagSet with godog flags binded +// +// Deprecated: +func FlagSet(opt *Options) *flag.FlagSet { + set := flag.NewFlagSet("godog", flag.ExitOnError) + BindFlags("", set, opt) + set.Usage = usage(set, opt.Output) + return set +} + +// BindFlags binds godog flags to given flag set prefixed +// by given prefix, without overriding usage +func BindFlags(prefix string, set *flag.FlagSet, opt *Options) { + set.Usage = usage(set, set.Output()) + + descFormatOption := "How to format tests output. Built-in formats:\n" + + type fm struct { + name string + desc string + } + var fms []fm + for name, desc := range AvailableFormatters() { + fms = append(fms, fm{ + name: name, + desc: desc, + }) + } + sort.Slice(fms, func(i, j int) bool { + return fms[i].name < fms[j].name + }) + + for _, fm := range fms { + descFormatOption += s(4) + "- " + colors.Yellow(fm.name) + ": " + fm.desc + "\n" + } + + descFormatOption = strings.TrimSpace(descFormatOption) + + // override flag defaults if any corresponding properties were supplied on the incoming `opt` + defFormatOption := "pretty" + if opt.Format != "" { + defFormatOption = opt.Format + } + + defTagsOption := "" + if opt.Tags != "" { + defTagsOption = opt.Tags + } + + defConcurrencyOption := 1 + if opt.Concurrency != 0 { + defConcurrencyOption = opt.Concurrency + } + + defShowStepDefinitions := false + if opt.ShowStepDefinitions { + defShowStepDefinitions = opt.ShowStepDefinitions + } + + defStopOnFailure := false + if opt.StopOnFailure { + defStopOnFailure = opt.StopOnFailure + } + + defStrict := false + if opt.Strict { + defStrict = opt.Strict + } + + defNoColors := false + if opt.NoColors { + defNoColors = opt.NoColors + } + + set.StringVar(&opt.Format, prefix+"format", defFormatOption, descFormatOption) + set.StringVar(&opt.Format, prefix+"f", defFormatOption, descFormatOption) + set.StringVar(&opt.Tags, prefix+"tags", defTagsOption, descTagsOption) + set.StringVar(&opt.Tags, prefix+"t", defTagsOption, descTagsOption) + set.IntVar(&opt.Concurrency, prefix+"concurrency", defConcurrencyOption, descConcurrencyOption) + set.IntVar(&opt.Concurrency, prefix+"c", defConcurrencyOption, descConcurrencyOption) + set.BoolVar(&opt.ShowStepDefinitions, prefix+"definitions", defShowStepDefinitions, "Print all available step definitions.") + set.BoolVar(&opt.ShowStepDefinitions, prefix+"d", defShowStepDefinitions, "Print all available step definitions.") + set.BoolVar(&opt.StopOnFailure, prefix+"stop-on-failure", defStopOnFailure, "Stop processing on first failed scenario.") + set.BoolVar(&opt.Strict, prefix+"strict", defStrict, "Fail suite when there are pending or undefined or ambiguous steps.") + set.BoolVar(&opt.NoColors, prefix+"no-colors", defNoColors, "Disable ansi colors.") + set.Var(&randomSeed{&opt.Randomize}, prefix+"random", descRandomOption) + set.BoolVar(&opt.ShowHelp, "godog.help", false, "Show usage help.") + set.Func(prefix+"paths", descFeaturesArgument, func(paths string) error { + if paths != "" { + opt.Paths = strings.Split(paths, ",") + } + + return nil + }) +} + +type flagged struct { + short, long, descr, dflt string +} + +func (f *flagged) name() string { + var name string + switch { + case len(f.short) > 0 && len(f.long) > 0: + name = fmt.Sprintf("-%s, --%s", f.short, f.long) + case len(f.long) > 0: + name = fmt.Sprintf("--%s", f.long) + case len(f.short) > 0: + name = fmt.Sprintf("-%s", f.short) + } + + if f.long == "random" { + // `random` is special in that we will later assign it randomly + // if the user specifies `--random` without specifying one, + // so mask the "default" value here to avoid UI confusion about + // what the value will end up being. + name += "[=SEED]" + } else if f.dflt != "true" && f.dflt != "false" { + name += "=" + f.dflt + } + return name +} + +func usage(set *flag.FlagSet, w io.Writer) func() { + return func() { + var list []*flagged + var longest int + set.VisitAll(func(f *flag.Flag) { + var fl *flagged + for _, flg := range list { + if flg.descr == f.Usage { + fl = flg + break + } + } + if nil == fl { + fl = &flagged{ + dflt: f.DefValue, + descr: f.Usage, + } + list = append(list, fl) + } + if len(f.Name) > 2 { + fl.long = f.Name + } else { + fl.short = f.Name + } + }) + + for _, f := range list { + if len(f.name()) > longest { + longest = len(f.name()) + } + } + + // prints an option or argument with a description, or only description + opt := func(name, desc string) string { + var ret []string + lines := strings.Split(desc, "\n") + ret = append(ret, s(2)+colors.Green(name)+s(longest+2-len(name))+lines[0]) + if len(lines) > 1 { + for _, ln := range lines[1:] { + ret = append(ret, s(2)+s(longest+2)+ln) + } + } + return strings.Join(ret, "\n") + } + + // --- GENERAL --- + fmt.Fprintln(w, colors.Yellow("Usage:")) + fmt.Fprintf(w, s(2)+"go test [options]\n\n") + + // --- OPTIONS --- + fmt.Fprintln(w, colors.Yellow("Options:")) + for _, f := range list { + fmt.Fprintln(w, opt(f.name(), f.descr)) + } + fmt.Fprintln(w, "") + } +} + +// randomSeed implements `flag.Value`, see https://golang.org/pkg/flag/#Value +type randomSeed struct { + ref *int64 +} + +func (rs *randomSeed) Set(s string) error { + if s == "true" { + *rs.ref = makeRandomSeed() + return nil + } + + if s == "false" { + *rs.ref = 0 + return nil + } + + i, err := strconv.ParseInt(s, 10, 64) + *rs.ref = i + return err +} + +func (rs *randomSeed) String() string { + if rs.ref == nil { + return "0" + } + return strconv.FormatInt(*rs.ref, 10) +} + +// If a Value has an IsBoolFlag() bool method returning true, the command-line +// parser makes -name equivalent to -name=true rather than using the next +// command-line argument. +func (rs *randomSeed) IsBoolFlag() bool { + return *rs.ref == 0 +} diff --git a/vendor/github.com/cucumber/godog/flags_v0110.go b/vendor/github.com/cucumber/godog/flags_v0110.go new file mode 100644 index 000000000..eddf0279d --- /dev/null +++ b/vendor/github.com/cucumber/godog/flags_v0110.go @@ -0,0 +1,33 @@ +package godog + +import ( + "errors" + "flag" + "math/rand" + "time" + + "github.com/spf13/pflag" + + "github.com/cucumber/godog/internal/flags" +) + +// Choose randomly assigns a convenient pseudo-random seed value. +// The resulting seed will be between `1-99999` for later ease of specification. +func makeRandomSeed() int64 { + return rand.New(rand.NewSource(time.Now().UTC().UnixNano())).Int63n(99998) + 1 +} + +func flagSet(opt *Options) *pflag.FlagSet { + set := pflag.NewFlagSet("godog", pflag.ExitOnError) + flags.BindRunCmdFlags("", set, opt) + pflag.ErrHelp = errors.New("godog: help requested") + return set +} + +// BindCommandLineFlags binds godog flags to given flag set prefixed +// by given prefix, without overriding usage +func BindCommandLineFlags(prefix string, opts *Options) { + flagSet := pflag.CommandLine + flags.BindRunCmdFlags(prefix, flagSet, opts) + pflag.CommandLine.AddGoFlagSet(flag.CommandLine) +} diff --git a/vendor/github.com/cucumber/godog/fmt.go b/vendor/github.com/cucumber/godog/fmt.go new file mode 100644 index 000000000..f30f9f895 --- /dev/null +++ b/vendor/github.com/cucumber/godog/fmt.go @@ -0,0 +1,124 @@ +package godog + +import ( + "fmt" + "io" + "strings" + "unicode/utf8" + + "github.com/cucumber/godog/colors" + "github.com/cucumber/godog/formatters" + internal_fmt "github.com/cucumber/godog/internal/formatters" + "github.com/cucumber/godog/internal/models" + "github.com/cucumber/godog/internal/storage" +) + +// FindFmt searches available formatters registered +// and returns FormaterFunc matched by given +// format name or nil otherwise +func FindFmt(name string) FormatterFunc { + return formatters.FindFmt(name) +} + +// Format registers a feature suite output +// formatter by given name, description and +// FormatterFunc constructor function, to initialize +// formatter with the output recorder. +func Format(name, description string, f FormatterFunc) { + formatters.Format(name, description, f) +} + +// AvailableFormatters gives a map of all +// formatters registered with their name as key +// and description as value +func AvailableFormatters() map[string]string { + return formatters.AvailableFormatters() +} + +// Formatter is an interface for feature runner +// output summary presentation. +// +// New formatters may be created to represent +// suite results in different ways. These new +// formatters needs to be registered with a +// godog.Format function call +type Formatter = formatters.Formatter + +type storageFormatter interface { + SetStorage(*storage.Storage) +} + +// FormatterFunc builds a formatter with given +// suite name and io.Writer to record output +type FormatterFunc = formatters.FormatterFunc + +func printStepDefinitions(steps []*models.StepDefinition, w io.Writer) { + var longest int + for _, def := range steps { + n := utf8.RuneCountInString(def.Expr.String()) + if longest < n { + longest = n + } + } + + for _, def := range steps { + n := utf8.RuneCountInString(def.Expr.String()) + location := internal_fmt.DefinitionID(def) + spaces := strings.Repeat(" ", longest-n) + fmt.Fprintln(w, + colors.Yellow(def.Expr.String())+spaces, + colors.Bold(colors.Black)("# "+location)) + } + + if len(steps) == 0 { + fmt.Fprintln(w, "there were no contexts registered, could not find any step definition..") + } +} + +// NewBaseFmt creates a new base formatter. +func NewBaseFmt(suite string, out io.Writer) *BaseFmt { + return internal_fmt.NewBase(suite, out) +} + +// NewProgressFmt creates a new progress formatter. +func NewProgressFmt(suite string, out io.Writer) *ProgressFmt { + return internal_fmt.NewProgress(suite, out) +} + +// NewPrettyFmt creates a new pretty formatter. +func NewPrettyFmt(suite string, out io.Writer) *PrettyFmt { + return &PrettyFmt{Base: NewBaseFmt(suite, out)} +} + +// NewEventsFmt creates a new event streaming formatter. +func NewEventsFmt(suite string, out io.Writer) *EventsFmt { + return &EventsFmt{Base: NewBaseFmt(suite, out)} +} + +// NewCukeFmt creates a new Cucumber JSON formatter. +func NewCukeFmt(suite string, out io.Writer) *CukeFmt { + return &CukeFmt{Base: NewBaseFmt(suite, out)} +} + +// NewJUnitFmt creates a new JUnit formatter. +func NewJUnitFmt(suite string, out io.Writer) *JUnitFmt { + return &JUnitFmt{Base: NewBaseFmt(suite, out)} +} + +// BaseFmt exports Base formatter. +type BaseFmt = internal_fmt.Base + +// ProgressFmt exports Progress formatter. +type ProgressFmt = internal_fmt.Progress + +// PrettyFmt exports Pretty formatter. +type PrettyFmt = internal_fmt.Pretty + +// EventsFmt exports Events formatter. +type EventsFmt = internal_fmt.Events + +// CukeFmt exports Cucumber JSON formatter. +type CukeFmt = internal_fmt.Cuke + +// JUnitFmt exports JUnit formatter. +type JUnitFmt = internal_fmt.JUnit diff --git a/vendor/github.com/cucumber/godog/formatters/fmt.go b/vendor/github.com/cucumber/godog/formatters/fmt.go new file mode 100644 index 000000000..973cf11b8 --- /dev/null +++ b/vendor/github.com/cucumber/godog/formatters/fmt.go @@ -0,0 +1,108 @@ +package formatters + +import ( + "io" + "regexp" + + messages "github.com/cucumber/messages/go/v21" +) + +type registeredFormatter struct { + name string + description string + fmt FormatterFunc +} + +var registeredFormatters []*registeredFormatter + +// FindFmt searches available formatters registered +// and returns FormaterFunc matched by given +// format name or nil otherwise +func FindFmt(name string) FormatterFunc { + for _, el := range registeredFormatters { + if el.name == name { + return el.fmt + } + } + + return nil +} + +// Format registers a feature suite output +// formatter by given name, description and +// FormatterFunc constructor function, to initialize +// formatter with the output recorder. +func Format(name, description string, f FormatterFunc) { + registeredFormatters = append(registeredFormatters, ®isteredFormatter{ + name: name, + fmt: f, + description: description, + }) +} + +// AvailableFormatters gives a map of all +// formatters registered with their name as key +// and description as value +func AvailableFormatters() map[string]string { + fmts := make(map[string]string, len(registeredFormatters)) + + for _, f := range registeredFormatters { + fmts[f.name] = f.description + } + + return fmts +} + +// Formatter is an interface for feature runner +// output summary presentation. +// +// New formatters may be created to represent +// suite results in different ways. These new +// formatters needs to be registered with a +// godog.Format function call +type Formatter interface { + TestRunStarted() + Feature(*messages.GherkinDocument, string, []byte) + Pickle(*messages.Pickle) + Defined(*messages.Pickle, *messages.PickleStep, *StepDefinition) + Failed(*messages.Pickle, *messages.PickleStep, *StepDefinition, error) + Passed(*messages.Pickle, *messages.PickleStep, *StepDefinition) + Skipped(*messages.Pickle, *messages.PickleStep, *StepDefinition) + Undefined(*messages.Pickle, *messages.PickleStep, *StepDefinition) + Pending(*messages.Pickle, *messages.PickleStep, *StepDefinition) + Ambiguous(*messages.Pickle, *messages.PickleStep, *StepDefinition, error) + Summary() +} + +// FlushFormatter is a `Formatter` but can be flushed. +type FlushFormatter interface { + Formatter + Flush() +} + +// FormatterFunc builds a formatter with given +// suite name and io.Writer to record output +type FormatterFunc func(string, io.Writer) Formatter + +// StepDefinition is a registered step definition +// contains a StepHandler and regexp which +// is used to match a step. Args which +// were matched by last executed step +// +// This structure is passed to the formatter +// when step is matched and is either failed +// or successful +type StepDefinition struct { + Expr *regexp.Regexp + Handler interface{} + Keyword Keyword +} + +type Keyword int64 + +const ( + Given Keyword = iota + When + Then + None +) diff --git a/vendor/github.com/cucumber/godog/godog.go b/vendor/github.com/cucumber/godog/godog.go new file mode 100644 index 000000000..dda501471 --- /dev/null +++ b/vendor/github.com/cucumber/godog/godog.go @@ -0,0 +1,43 @@ +/* +Package godog is the official Cucumber BDD framework for Golang, it merges specification +and test documentation into one cohesive whole. + +Godog does not intervene with the standard "go test" command and it's behavior. +You can leverage both frameworks to functionally test your application while +maintaining all test related source code in *_test.go files. + +Godog acts similar compared to go test command. It uses go +compiler and linker tool in order to produce test executable. Godog +contexts needs to be exported same as Test functions for go test. + +For example, imagine you're about to create the famous UNIX ls command. +Before you begin, you describe how the feature should work, see the example below.. + +Example: + + Feature: ls + In order to see the directory structure + As a UNIX user + I need to be able to list the current directory's contents + + Scenario: + Given I am in a directory "test" + And I have a file named "foo" + And I have a file named "bar" + When I run ls + Then I should get output: + """ + bar + foo + """ + +Now, wouldn't it be cool if something could read this sentence and use it to actually +run a test against the ls command? Hey, that's exactly what this package does! +As you'll see, Godog is easy to learn, quick to use, and will put the fun back into tests. + +Godog was inspired by Behat and Cucumber the above description is taken from it's documentation. +*/ +package godog + +// Version of package - based on Semantic Versioning 2.0.0 http://semver.org/ +var Version = "v0.0.0-dev" diff --git a/vendor/github.com/cucumber/godog/internal/builder/ast.go b/vendor/github.com/cucumber/godog/internal/builder/ast.go new file mode 100644 index 000000000..c4f82407c --- /dev/null +++ b/vendor/github.com/cucumber/godog/internal/builder/ast.go @@ -0,0 +1,31 @@ +package builder + +import "go/ast" + +func astContexts(f *ast.File, selectName string) []string { + var contexts []string + for _, d := range f.Decls { + switch fun := d.(type) { + case *ast.FuncDecl: + for _, param := range fun.Type.Params.List { + switch expr := param.Type.(type) { + case *ast.StarExpr: + switch x := expr.X.(type) { + case *ast.Ident: + if x.Name == selectName { + contexts = append(contexts, fun.Name.Name) + } + case *ast.SelectorExpr: + switch t := x.X.(type) { + case *ast.Ident: + if t.Name == "godog" && x.Sel.Name == selectName { + contexts = append(contexts, fun.Name.Name) + } + } + } + } + } + } + } + return contexts +} diff --git a/vendor/github.com/cucumber/godog/internal/builder/builder.go b/vendor/github.com/cucumber/godog/internal/builder/builder.go new file mode 100644 index 000000000..4cd4928f5 --- /dev/null +++ b/vendor/github.com/cucumber/godog/internal/builder/builder.go @@ -0,0 +1,490 @@ +package builder + +import ( + "bytes" + "encoding/json" + "fmt" + "go/build" + "go/parser" + "go/token" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "strings" + "text/template" + "time" + "unicode" +) + +var ( + tooldir = findToolDir() + compiler = filepath.Join(tooldir, "compile") + linker = filepath.Join(tooldir, "link") + gopaths = filepath.SplitList(build.Default.GOPATH) + godogImportPath = "github.com/cucumber/godog" + + // godep + runnerTemplate = template.Must(template.New("testmain").Parse(`package main + +import ( + "github.com/cucumber/godog" + {{if or .TestSuiteContexts .ScenarioContexts}}_test "{{.ImportPath}}"{{end}} + {{if or .XTestSuiteContexts .XScenarioContexts}}_xtest "{{.ImportPath}}_test"{{end}} + {{if or .XTestSuiteContexts .XScenarioContexts}}"testing/internal/testdeps"{{end}} + "os" +) + +{{if or .XTestSuiteContexts .XScenarioContexts}} +func init() { + testdeps.ImportPath = "{{.ImportPath}}" +} +{{end}} + +func main() { + status := godog.TestSuite{ + Name: "{{ .Name }}", + TestSuiteInitializer: func (ctx *godog.TestSuiteContext) { + os.Setenv("GODOG_TESTED_PACKAGE", "{{.ImportPath}}") + {{range .TestSuiteContexts}} + _test.{{ . }}(ctx) + {{end}} + {{range .XTestSuiteContexts}} + _xtest.{{ . }}(ctx) + {{end}} + }, + ScenarioInitializer: func (ctx *godog.ScenarioContext) { + {{range .ScenarioContexts}} + _test.{{ . }}(ctx) + {{end}} + {{range .XScenarioContexts}} + _xtest.{{ . }}(ctx) + {{end}} + }, + }.Run() + + os.Exit(status) +}`)) + + // temp file for import + tempFileTemplate = template.Must(template.New("temp").Parse(`package {{.Name}} + +import "github.com/cucumber/godog" + +var _ = godog.Version +`)) +) + +// Build creates a test package like go test command at given target path. +// If there are no go files in tested directory, then +// it simply builds a godog executable to scan features. +// +// If there are go test files, it first builds a test +// package with standard go test command. +// +// Finally it generates godog suite executable which +// registers exported godog contexts from the test files +// of tested package. +// +// Returns the path to generated executable +func Build(bin string) error { + abs, err := filepath.Abs(".") + if err != nil { + return err + } + + // we allow package to be nil, if godog is run only when + // there is a feature file in empty directory + pkg := importPackage(abs) + src, err := buildTestMain(pkg) + if err != nil { + return err + } + + // may need to produce temp file for godog dependency + srcTemp, err := buildTempFile(pkg) + if err != nil { + return err + } + + if srcTemp != nil { + // @TODO: in case of modules we cannot build it our selves, we need to have this hacky option + pathTemp := filepath.Join(abs, "godog_dependency_file_test.go") + err = ioutil.WriteFile(pathTemp, srcTemp, 0644) + if err != nil { + return err + } + defer os.Remove(pathTemp) + } + + workdir := "" + testdir := workdir + + // build and compile the tested package. + // generated test executable will be removed + // since we do not need it for godog suite. + // we also print back the temp WORK directory + // go has built. We will reuse it for our suite workdir. + temp := fmt.Sprintf(filepath.Join("%s", "temp-%d.test"), os.TempDir(), time.Now().UnixNano()) + if os.Getenv("GO111MODULE") != "off" { + modTidyOutput, err := exec.Command("go", "mod", "tidy").CombinedOutput() + if err != nil { + return fmt.Errorf("failed to tidy modules in tested package: %s, reason: %v, output: %s", abs, err, string(modTidyOutput)) + } + } + testOutput, err := exec.Command("go", "test", "-c", "-work", "-o", temp).CombinedOutput() + if err != nil { + return fmt.Errorf("failed to compile tested package: %s, reason: %v, output: %s", abs, err, string(testOutput)) + } + defer os.Remove(temp) + + // extract go-build temporary directory as our workdir + linesOut := strings.Split(strings.TrimSpace(string(testOutput)), "\n") + // it may have some compilation warnings, in the output, but these are not + // considered to be errors, since command exit status is 0 + for _, ln := range linesOut { + if !strings.HasPrefix(ln, "WORK=") { + continue + } + workdir = strings.Replace(ln, "WORK=", "", 1) + break + } + + if strings.Contains(string(testOutput), "[no test files]") { + return fmt.Errorf("incorrect project structure: no test files found") + } + + // may not locate it in output + if workdir == testdir { + return fmt.Errorf("expected WORK dir path to be present in output: %s", string(testOutput)) + } + + // check whether workdir exists + stats, err := os.Stat(workdir) + if os.IsNotExist(err) { + return fmt.Errorf("expected WORK dir: %s to be available", workdir) + } + + if !stats.IsDir() { + return fmt.Errorf("expected WORK dir: %s to be directory", workdir) + } + testdir = filepath.Join(workdir, "b001") + defer os.RemoveAll(workdir) + + // replace _testmain.go file with our own + testmain := filepath.Join(testdir, "_testmain.go") + err = ioutil.WriteFile(testmain, src, 0644) + if err != nil { + return err + } + + // godog package may be vendored and may need importmap + vendored := maybeVendoredGodog() + + // compile godog testmain package archive + // we do not depend on CGO so a lot of checks are not necessary + linkerCfg := filepath.Join(testdir, "importcfg.link") + compilerCfg := linkerCfg + + if vendored != nil { + data, err := ioutil.ReadFile(linkerCfg) + if err != nil { + return err + } + + data = append(data, []byte(fmt.Sprintf("importmap %s=%s\n", godogImportPath, vendored.ImportPath))...) + compilerCfg = filepath.Join(testdir, "importcfg") + + err = ioutil.WriteFile(compilerCfg, data, 0644) + if err != nil { + return err + } + } + + testMainPkgOut := filepath.Join(testdir, "main.a") + args := []string{ + "-o", testMainPkgOut, + "-importcfg", compilerCfg, + "-p", "main", + "-complete", + } + + if err := filterImportCfg(compilerCfg); err != nil { + return err + } + + args = append(args, "-pack", testmain) + cmd := exec.Command(compiler, args...) + cmd.Env = os.Environ() + out, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("failed to compile testmain package: %v - output: %s", err, string(out)) + } + + // link test suite executable + args = []string{ + "-o", bin, + "-importcfg", linkerCfg, + "-buildmode=exe", + } + args = append(args, testMainPkgOut) + cmd = exec.Command(linker, args...) + cmd.Env = os.Environ() + + out, err = cmd.CombinedOutput() + if err != nil { + msg := `failed to link test executable: + reason: %s + command: %s` + return fmt.Errorf(msg, string(out), linker+" '"+strings.Join(args, "' '")+"'") + } + + return nil +} + +// filterImportCfg strips unsupported lines from imports configuration. +func filterImportCfg(path string) error { + orig, err := os.ReadFile(path) + if err != nil { + return fmt.Errorf("failed to read %s: %w", path, err) + } + + res := "" + for _, l := range strings.Split(string(orig), "\n") { + if !strings.HasPrefix(l, "modinfo") { + res += l + "\n" + } + } + err = ioutil.WriteFile(path, []byte(res), 0600) + if err != nil { + return fmt.Errorf("failed to write %s: %w", path, err) + } + + return nil +} + +func maybeVendoredGodog() *build.Package { + dir, err := filepath.Abs(".") + if err != nil { + return nil + } + + for _, gopath := range gopaths { + gopath = filepath.Join(gopath, "src") + for strings.HasPrefix(dir, gopath) && dir != gopath { + pkg, err := build.ImportDir(filepath.Join(dir, "vendor", godogImportPath), 0) + if err != nil { + dir = filepath.Dir(dir) + continue + } + return pkg + } + } + return nil +} + +func normaliseLocalImportPath(dir string) string { + return path.Join("_", strings.Map(makeImportValid, filepath.ToSlash(dir))) +} +func importPackage(dir string) *build.Package { + pkg, _ := build.ImportDir(dir, 0) + + // normalize import path for local import packages + // taken from go source code + // see: https://github.com/golang/go/blob/go1.7rc5/src/cmd/go/pkg.go#L279 + if pkg != nil && pkg.ImportPath == "." { + pkg.ImportPath = normaliseLocalImportPath(dir) + } + + return pkg +} + +// from go src +func makeImportValid(r rune) rune { + // Should match Go spec, compilers, and ../../go/parser/parser.go:/isValidImport. + const illegalChars = `!"#$%&'()*,:;<=>?[\]^{|}` + "`\uFFFD" + if !unicode.IsGraphic(r) || unicode.IsSpace(r) || strings.ContainsRune(illegalChars, r) { + return '_' + } + return r +} + +// build temporary file content if godog +// package is not present in currently tested package +func buildTempFile(pkg *build.Package) ([]byte, error) { + shouldBuild := true + var name string + if pkg != nil { + name = pkg.Name + all := pkg.Imports + all = append(all, pkg.TestImports...) + all = append(all, pkg.XTestImports...) + for _, imp := range all { + if imp == godogImportPath { + shouldBuild = false + break + } + } + + // maybe we are testing the godog package on it's own + if name == "godog" { + if parseImport(pkg.ImportPath, pkg.Root) == godogImportPath { + shouldBuild = false + } + } + } + + if name == "" { + name = "main" + } + + if !shouldBuild { + return nil, nil + } + + data := struct{ Name string }{name} + var buf bytes.Buffer + if err := tempFileTemplate.Execute(&buf, data); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// buildTestMain if given package is valid +// it scans test files for contexts +// and produces a testmain source code. +func buildTestMain(pkg *build.Package) ([]byte, error) { + var ( + ctxs, xctxs contexts + err error + name = "main" + importPath string + ) + + if nil != pkg { + if ctxs, err = processPackageTestFiles(pkg.TestGoFiles); err != nil { + return nil, err + } + + if xctxs, err = processPackageTestFiles(pkg.XTestGoFiles); err != nil { + return nil, err + } + + importPath = parseImport(pkg.ImportPath, pkg.Root) + name = pkg.Name + } else { + name = "main" + } + + data := struct { + Name string + ImportPath string + TestSuiteContexts []string + ScenarioContexts []string + XTestSuiteContexts []string + XScenarioContexts []string + }{ + Name: name, + ImportPath: importPath, + TestSuiteContexts: ctxs.testSuiteCtxs, + ScenarioContexts: ctxs.scenarioCtxs, + XTestSuiteContexts: xctxs.testSuiteCtxs, + XScenarioContexts: xctxs.scenarioCtxs, + } + + var buf bytes.Buffer + if err = runnerTemplate.Execute(&buf, data); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// parseImport parses the import path to deal with go module. +func parseImport(rawPath, rootPath string) string { + // with go > 1.11 and go module enabled out of the GOPATH, + // the import path begins with an underscore and the GOPATH is unknown on build. + if rootPath != "" { + // go < 1.11 or it's a module inside the GOPATH + return rawPath + } + // for module support, query the module import path + cmd := exec.Command("go", "list", "-m", "-json") + out, err := cmd.StdoutPipe() + if err != nil { + // Unable to read stdout + return rawPath + } + if cmd.Start() != nil { + // Does not using modules + return rawPath + } + var mod struct { + Dir string `json:"Dir"` + Path string `json:"Path"` + } + if json.NewDecoder(out).Decode(&mod) != nil { + // Unexpected result + return rawPath + } + if cmd.Wait() != nil { + return rawPath + } + // Concatenates the module path with the current sub-folders if needed + return mod.Path + filepath.ToSlash(strings.TrimPrefix(rawPath, normaliseLocalImportPath(mod.Dir))) +} + +type contexts struct { + deprecatedFeatureCtxs []string + testSuiteCtxs []string + scenarioCtxs []string +} + +func (ctxs contexts) validate() error { + var allCtxs []string + allCtxs = append(allCtxs, ctxs.deprecatedFeatureCtxs...) + allCtxs = append(allCtxs, ctxs.testSuiteCtxs...) + allCtxs = append(allCtxs, ctxs.scenarioCtxs...) + + var failed []string + for _, ctx := range allCtxs { + runes := []rune(ctx) + if unicode.IsLower(runes[0]) { + expected := append([]rune{unicode.ToUpper(runes[0])}, runes[1:]...) + failed = append(failed, fmt.Sprintf("%s - should be: %s", ctx, string(expected))) + } + } + + if len(failed) > 0 { + return fmt.Errorf("godog contexts must be exported:\n\t%s", strings.Join(failed, "\n\t")) + } + + return nil +} + +// processPackageTestFiles runs through ast of each test +// file pack and looks for godog suite contexts to register +// on run +func processPackageTestFiles(packs ...[]string) (ctxs contexts, _ error) { + fset := token.NewFileSet() + for _, pack := range packs { + for _, testFile := range pack { + node, err := parser.ParseFile(fset, testFile, nil, 0) + if err != nil { + return ctxs, err + } + + ctxs.testSuiteCtxs = append(ctxs.testSuiteCtxs, astContexts(node, "TestSuiteContext")...) + ctxs.scenarioCtxs = append(ctxs.scenarioCtxs, astContexts(node, "ScenarioContext")...) + } + } + + return ctxs, ctxs.validate() +} + +func findToolDir() string { + if out, err := exec.Command("go", "env", "GOTOOLDIR").Output(); err != nil { + return filepath.Clean(strings.TrimSpace(string(out))) + } + return filepath.Clean(build.ToolDir) +} diff --git a/vendor/github.com/cucumber/godog/internal/flags/flags.go b/vendor/github.com/cucumber/godog/internal/flags/flags.go new file mode 100644 index 000000000..1bd67e591 --- /dev/null +++ b/vendor/github.com/cucumber/godog/internal/flags/flags.go @@ -0,0 +1,49 @@ +package flags + +import ( + "github.com/spf13/pflag" +) + +// BindRunCmdFlags is an internal func to bind run subcommand flags. +func BindRunCmdFlags(prefix string, flagSet *pflag.FlagSet, opts *Options) { + if opts.Concurrency == 0 { + opts.Concurrency = 1 + } + + if opts.Format == "" { + opts.Format = "pretty" + } + + flagSet.BoolVar(&opts.NoColors, prefix+"no-colors", opts.NoColors, "disable ansi colors") + flagSet.IntVarP(&opts.Concurrency, prefix+"concurrency", "c", opts.Concurrency, "run the test suite with concurrency") + flagSet.StringVarP(&opts.Tags, prefix+"tags", "t", opts.Tags, `filter scenarios by tags, expression can be: + "@wip" run all scenarios with wip tag + "~@wip" exclude all scenarios with wip tag + "@wip && ~@new" run wip scenarios, but exclude new + "@wip,@undone" run wip or undone scenarios`) + flagSet.StringVarP(&opts.Format, prefix+"format", "f", opts.Format, `will write a report according to the selected formatter + +usage: + -f + will use the formatter and write the report on stdout + -f : + will use the formatter and write the report to the file path + +built-in formatters are: + progress prints a character per step + cucumber produces a Cucumber JSON report + events produces JSON event stream, based on spec: 0.1.0 + junit produces JUnit compatible XML report + pretty prints every feature with runtime statuses + `) + + flagSet.BoolVarP(&opts.ShowStepDefinitions, prefix+"definitions", "d", opts.ShowStepDefinitions, "print all available step definitions") + flagSet.BoolVar(&opts.StopOnFailure, prefix+"stop-on-failure", opts.StopOnFailure, "stop processing on first failed scenario") + flagSet.BoolVar(&opts.Strict, prefix+"strict", opts.Strict, "fail suite when there are pending or undefined or ambiguous steps") + + flagSet.Int64Var(&opts.Randomize, prefix+"random", opts.Randomize, `randomly shuffle the scenario execution order + --random +specify SEED to reproduce the shuffling from a previous run + --random=5738`) + flagSet.Lookup(prefix + "random").NoOptDefVal = "-1" +} diff --git a/vendor/github.com/cucumber/godog/internal/flags/options.go b/vendor/github.com/cucumber/godog/internal/flags/options.go new file mode 100644 index 000000000..40acea652 --- /dev/null +++ b/vendor/github.com/cucumber/godog/internal/flags/options.go @@ -0,0 +1,88 @@ +package flags + +import ( + "context" + "io" + "io/fs" + "testing" +) + +// Options are suite run options +// flags are mapped to these options. +// +// It can also be used together with godog.RunWithOptions +// to run test suite from go source directly +// +// See the flags for more details +type Options struct { + // Print step definitions found and exit + ShowStepDefinitions bool + + // Randomize, if not `0`, will be used to run scenarios in a random order. + // + // Randomizing scenario order is especially helpful for detecting + // situations where you have state leaking between scenarios, which can + // cause flickering or fragile tests. + // + // The default value of `0` means "do not randomize". + // + // The magic value of `-1` means "pick a random seed for me", and godog will + // assign a seed on it's own during the `RunWithOptions` phase, similar to if + // you specified `--random` on the command line. + // + // Any other value will be used as the random seed for shuffling. Re-using the + // same seed will allow you to reproduce the shuffle order of a previous run + // to isolate an error condition. + Randomize int64 + + // Stops on the first failure + StopOnFailure bool + + // Fail suite when there are pending or undefined or ambiguous steps + Strict bool + + // Forces ansi color stripping + NoColors bool + + // Various filters for scenarios parsed + // from feature files + Tags string + + // Dialect to be used to parse feature files. If not set, default to "en". + Dialect string + + // The formatter name + Format string + + // Concurrency rate, not all formatters accepts this + Concurrency int + + // All feature file paths + Paths []string + + // Where it should print formatter output + Output io.Writer + + // DefaultContext is used as initial context instead of context.Background(). + DefaultContext context.Context + + // TestingT runs scenarios as subtests. + TestingT *testing.T + + // FeatureContents allows passing in each feature manually + // where the contents of each feature is stored as a byte slice + // in a map entry + FeatureContents []Feature + + // FS allows passing in an `fs.FS` to read features from, such as an `embed.FS` + // or os.DirFS(string). + FS fs.FS + + // ShowHelp enables suite to show CLI flags usage help and exit. + ShowHelp bool +} + +type Feature struct { + Name string + Contents []byte +} diff --git a/vendor/github.com/cucumber/godog/internal/formatters/fmt.go b/vendor/github.com/cucumber/godog/internal/formatters/fmt.go new file mode 100644 index 000000000..5530c0c24 --- /dev/null +++ b/vendor/github.com/cucumber/godog/internal/formatters/fmt.go @@ -0,0 +1,104 @@ +package formatters + +import ( + "fmt" + "os" + "path/filepath" + "regexp" + "runtime" + "strconv" + "strings" + + messages "github.com/cucumber/messages/go/v21" + + "github.com/cucumber/godog/colors" + "github.com/cucumber/godog/internal/models" + "github.com/cucumber/godog/internal/utils" +) + +var ( + red = colors.Red + redb = colors.Bold(colors.Red) + green = colors.Green + blackb = colors.Bold(colors.Black) + yellow = colors.Yellow + cyan = colors.Cyan + cyanb = colors.Bold(colors.Cyan) + whiteb = colors.Bold(colors.White) +) + +// repeats a space n times +var s = utils.S + +var ( + passed = models.Passed + failed = models.Failed + skipped = models.Skipped + undefined = models.Undefined + pending = models.Pending + ambiguous = models.Ambiguous +) + +type sortFeaturesByName []*models.Feature + +func (s sortFeaturesByName) Len() int { return len(s) } +func (s sortFeaturesByName) Less(i, j int) bool { return s[i].Feature.Name < s[j].Feature.Name } +func (s sortFeaturesByName) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +type sortPicklesByID []*messages.Pickle + +func (s sortPicklesByID) Len() int { return len(s) } +func (s sortPicklesByID) Less(i, j int) bool { + iID := mustConvertStringToInt(s[i].Id) + jID := mustConvertStringToInt(s[j].Id) + return iID < jID +} +func (s sortPicklesByID) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +type sortPickleStepResultsByPickleStepID []models.PickleStepResult + +func (s sortPickleStepResultsByPickleStepID) Len() int { return len(s) } +func (s sortPickleStepResultsByPickleStepID) Less(i, j int) bool { + iID := mustConvertStringToInt(s[i].PickleStepID) + jID := mustConvertStringToInt(s[j].PickleStepID) + return iID < jID +} +func (s sortPickleStepResultsByPickleStepID) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +func mustConvertStringToInt(s string) int { + i, err := strconv.Atoi(s) + if err != nil { + panic(err) + } + + return i +} + +// DefinitionID ... +func DefinitionID(sd *models.StepDefinition) string { + ptr := sd.HandlerValue.Pointer() + f := runtime.FuncForPC(ptr) + dir := filepath.Dir(sd.File) + fn := strings.Replace(f.Name(), dir, "", -1) + var parts []string + for _, gr := range matchFuncDefRef.FindAllStringSubmatch(fn, -1) { + parts = append(parts, strings.Trim(gr[1], "_.")) + } + if len(parts) > 0 { + // case when suite is a structure with methods + fn = strings.Join(parts, ".") + } else { + // case when steps are just plain funcs + fn = strings.Trim(fn, "_.") + } + + if pkg := os.Getenv("GODOG_TESTED_PACKAGE"); len(pkg) > 0 { + fn = strings.Replace(fn, pkg, "", 1) + fn = strings.TrimLeft(fn, ".") + fn = strings.Replace(fn, "..", ".", -1) + } + + return fmt.Sprintf("%s:%d -> %s", filepath.Base(sd.File), sd.Line, fn) +} + +var matchFuncDefRef = regexp.MustCompile(`\(([^\)]+)\)`) diff --git a/vendor/github.com/cucumber/godog/internal/formatters/fmt_base.go b/vendor/github.com/cucumber/godog/internal/formatters/fmt_base.go new file mode 100644 index 000000000..607a1c065 --- /dev/null +++ b/vendor/github.com/cucumber/godog/internal/formatters/fmt_base.go @@ -0,0 +1,272 @@ +package formatters + +import ( + "bytes" + "fmt" + "io" + "os" + "sort" + "strconv" + "strings" + "sync" + "unicode" + + messages "github.com/cucumber/messages/go/v21" + + "github.com/cucumber/godog/colors" + "github.com/cucumber/godog/formatters" + "github.com/cucumber/godog/internal/models" + "github.com/cucumber/godog/internal/storage" + "github.com/cucumber/godog/internal/utils" +) + +// BaseFormatterFunc implements the FormatterFunc for the base formatter. +func BaseFormatterFunc(suite string, out io.Writer) formatters.Formatter { + return NewBase(suite, out) +} + +// NewBase creates a new base formatter. +func NewBase(suite string, out io.Writer) *Base { + return &Base{ + suiteName: suite, + indent: 2, + out: out, + Lock: new(sync.Mutex), + } +} + +// Base is a base formatter. +type Base struct { + suiteName string + out io.Writer + indent int + + Storage *storage.Storage + Lock *sync.Mutex +} + +// SetStorage assigns gherkin data storage. +func (f *Base) SetStorage(st *storage.Storage) { + f.Lock.Lock() + defer f.Lock.Unlock() + + f.Storage = st +} + +// TestRunStarted is triggered on test start. +func (f *Base) TestRunStarted() {} + +// Feature receives gherkin document. +func (f *Base) Feature(*messages.GherkinDocument, string, []byte) {} + +// Pickle receives scenario. +func (f *Base) Pickle(*messages.Pickle) {} + +// Defined receives step definition. +func (f *Base) Defined(*messages.Pickle, *messages.PickleStep, *formatters.StepDefinition) { +} + +// Passed captures passed step. +func (f *Base) Passed(*messages.Pickle, *messages.PickleStep, *formatters.StepDefinition) {} + +// Skipped captures skipped step. +func (f *Base) Skipped(*messages.Pickle, *messages.PickleStep, *formatters.StepDefinition) { +} + +// Undefined captures undefined step. +func (f *Base) Undefined(*messages.Pickle, *messages.PickleStep, *formatters.StepDefinition) { +} + +// Failed captures failed step. +func (f *Base) Failed(*messages.Pickle, *messages.PickleStep, *formatters.StepDefinition, error) { +} + +// Pending captures pending step. +func (f *Base) Pending(*messages.Pickle, *messages.PickleStep, *formatters.StepDefinition) { +} + +// Ambiguous captures ambiguous step. +func (f *Base) Ambiguous(*messages.Pickle, *messages.PickleStep, *formatters.StepDefinition, error) { +} + +// Summary renders summary information. +func (f *Base) Summary() { + var totalSc, passedSc, undefinedSc int + var totalSt, passedSt, failedSt, skippedSt, pendingSt, undefinedSt, ambiguousSt int + + pickleResults := f.Storage.MustGetPickleResults() + for _, pr := range pickleResults { + var prStatus models.StepResultStatus + totalSc++ + + pickleStepResults := f.Storage.MustGetPickleStepResultsByPickleID(pr.PickleID) + + if len(pickleStepResults) == 0 { + prStatus = undefined + } + + for _, sr := range pickleStepResults { + totalSt++ + + switch sr.Status { + case passed: + passedSt++ + case failed: + prStatus = failed + failedSt++ + case ambiguous: + prStatus = ambiguous + ambiguousSt++ + case skipped: + skippedSt++ + case undefined: + prStatus = undefined + undefinedSt++ + case pending: + prStatus = pending + pendingSt++ + } + } + + if prStatus == passed { + passedSc++ + } else if prStatus == undefined { + undefinedSc++ + } + } + + var steps, parts, scenarios []string + if passedSt > 0 { + steps = append(steps, green(fmt.Sprintf("%d passed", passedSt))) + } + if failedSt > 0 { + parts = append(parts, red(fmt.Sprintf("%d failed", failedSt))) + steps = append(steps, red(fmt.Sprintf("%d failed", failedSt))) + } + if pendingSt > 0 { + parts = append(parts, yellow(fmt.Sprintf("%d pending", pendingSt))) + steps = append(steps, yellow(fmt.Sprintf("%d pending", pendingSt))) + } + if ambiguousSt > 0 { + parts = append(parts, yellow(fmt.Sprintf("%d ambiguous", ambiguousSt))) + steps = append(steps, yellow(fmt.Sprintf("%d ambiguous", ambiguousSt))) + } + if undefinedSt > 0 { + parts = append(parts, yellow(fmt.Sprintf("%d undefined", undefinedSc))) + steps = append(steps, yellow(fmt.Sprintf("%d undefined", undefinedSt))) + } else if undefinedSc > 0 { + // there may be some scenarios without steps + parts = append(parts, yellow(fmt.Sprintf("%d undefined", undefinedSc))) + } + if skippedSt > 0 { + steps = append(steps, cyan(fmt.Sprintf("%d skipped", skippedSt))) + } + if passedSc > 0 { + scenarios = append(scenarios, green(fmt.Sprintf("%d passed", passedSc))) + } + scenarios = append(scenarios, parts...) + + testRunStartedAt := f.Storage.MustGetTestRunStarted().StartedAt + elapsed := utils.TimeNowFunc().Sub(testRunStartedAt) + + fmt.Fprintln(f.out, "") + + if totalSc == 0 { + fmt.Fprintln(f.out, "No scenarios") + } else { + fmt.Fprintf(f.out, "%d scenarios (%s)\n", totalSc, strings.Join(scenarios, ", ")) + } + + if totalSt == 0 { + fmt.Fprintln(f.out, "No steps") + } else { + fmt.Fprintf(f.out, "%d steps (%s)\n", totalSt, strings.Join(steps, ", ")) + } + + elapsedString := elapsed.String() + if elapsed.Nanoseconds() == 0 { + // go 1.5 and 1.6 prints 0 instead of 0s, if duration is zero. + elapsedString = "0s" + } + fmt.Fprintln(f.out, elapsedString) + + // prints used randomization seed + seed, err := strconv.ParseInt(os.Getenv("GODOG_SEED"), 10, 64) + if err == nil && seed != 0 { + fmt.Fprintln(f.out, "") + fmt.Fprintln(f.out, "Randomized with seed:", colors.Yellow(seed)) + } + + if text := f.Snippets(); text != "" { + fmt.Fprintln(f.out, "") + fmt.Fprintln(f.out, yellow("You can implement step definitions for undefined steps with these snippets:")) + fmt.Fprintln(f.out, yellow(text)) + } +} + +// Snippets returns code suggestions for undefined steps. +func (f *Base) Snippets() string { + undefinedStepResults := f.Storage.MustGetPickleStepResultsByStatus(undefined) + if len(undefinedStepResults) == 0 { + return "" + } + + var index int + var snips []undefinedSnippet + // build snippets + for _, u := range undefinedStepResults { + pickleStep := f.Storage.MustGetPickleStep(u.PickleStepID) + + steps := []string{pickleStep.Text} + arg := pickleStep.Argument + if u.Def != nil { + steps = u.Def.Undefined + arg = nil + } + for _, step := range steps { + expr := snippetExprCleanup.ReplaceAllString(step, "\\$1") + expr = snippetNumbers.ReplaceAllString(expr, "(\\d+)") + expr = snippetExprQuoted.ReplaceAllString(expr, "$1\"([^\"]*)\"$2") + expr = "^" + strings.TrimSpace(expr) + "$" + + name := snippetNumbers.ReplaceAllString(step, " ") + name = snippetExprQuoted.ReplaceAllString(name, " ") + name = strings.TrimSpace(snippetMethodName.ReplaceAllString(name, "")) + var words []string + for i, w := range strings.Split(name, " ") { + switch { + case i != 0: + w = strings.Title(w) + case len(w) > 0: + w = string(unicode.ToLower(rune(w[0]))) + w[1:] + } + words = append(words, w) + } + name = strings.Join(words, "") + if len(name) == 0 { + index++ + name = fmt.Sprintf("StepDefinitioninition%d", index) + } + + var found bool + for _, snip := range snips { + if snip.Expr == expr { + found = true + break + } + } + if !found { + snips = append(snips, undefinedSnippet{Method: name, Expr: expr, argument: arg}) + } + } + } + + sort.Sort(snippetSortByMethod(snips)) + + var buf bytes.Buffer + if err := undefinedSnippetsTpl.Execute(&buf, snips); err != nil { + panic(err) + } + // there may be trailing spaces + return strings.Replace(buf.String(), " \n", "\n", -1) +} diff --git a/vendor/github.com/cucumber/godog/internal/formatters/fmt_cucumber.go b/vendor/github.com/cucumber/godog/internal/formatters/fmt_cucumber.go new file mode 100644 index 000000000..31380c975 --- /dev/null +++ b/vendor/github.com/cucumber/godog/internal/formatters/fmt_cucumber.go @@ -0,0 +1,326 @@ +package formatters + +/* + The specification for the formatting originated from https://www.relishapp.com/cucumber/cucumber/docs/formatters/json-output-formatter. + I found that documentation was misleading or out dated. To validate formatting I create a ruby cucumber test harness and ran the + same feature files through godog and the ruby cucumber. + + The docstrings in the cucumber.feature represent the cucumber output for those same feature definitions. + + I did note that comments in ruby could be at just about any level in particular Feature, Scenario and Step. In godog I + could only find comments under the Feature data structure. +*/ + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "sort" + "strings" + + "github.com/cucumber/godog/formatters" + "github.com/cucumber/godog/internal/models" + messages "github.com/cucumber/messages/go/v21" +) + +func init() { + formatters.Format("cucumber", "Produces cucumber JSON format output.", CucumberFormatterFunc) +} + +// CucumberFormatterFunc implements the FormatterFunc for the cucumber formatter +func CucumberFormatterFunc(suite string, out io.Writer) formatters.Formatter { + return &Cuke{Base: NewBase(suite, out)} +} + +// Cuke ... +type Cuke struct { + *Base +} + +// Summary renders test result as Cucumber JSON. +func (f *Cuke) Summary() { + features := f.Storage.MustGetFeatures() + + res := f.buildCukeFeatures(features) + + dat, err := json.MarshalIndent(res, "", " ") + if err != nil { + panic(err) + } + + fmt.Fprintf(f.out, "%s\n", string(dat)) +} + +func (f *Cuke) buildCukeFeatures(features []*models.Feature) (res []CukeFeatureJSON) { + sort.Sort(sortFeaturesByName(features)) + + res = make([]CukeFeatureJSON, len(features)) + + for idx, feat := range features { + cukeFeature := buildCukeFeature(feat) + + pickles := f.Storage.MustGetPickles(feat.Uri) + sort.Sort(sortPicklesByID(pickles)) + + cukeFeature.Elements = f.buildCukeElements(pickles) + + for jdx, elem := range cukeFeature.Elements { + elem.ID = cukeFeature.ID + ";" + makeCukeID(elem.Name) + elem.ID + elem.Tags = append(cukeFeature.Tags, elem.Tags...) + cukeFeature.Elements[jdx] = elem + } + + res[idx] = cukeFeature + } + + return res +} + +func (f *Cuke) buildCukeElements(pickles []*messages.Pickle) (res []cukeElement) { + res = make([]cukeElement, len(pickles)) + + for idx, pickle := range pickles { + pickleResult := f.Storage.MustGetPickleResult(pickle.Id) + pickleStepResults := f.Storage.MustGetPickleStepResultsByPickleID(pickle.Id) + + cukeElement := f.buildCukeElement(pickle) + + stepStartedAt := pickleResult.StartedAt + + cukeElement.Steps = make([]cukeStep, len(pickleStepResults)) + sort.Sort(sortPickleStepResultsByPickleStepID(pickleStepResults)) + + for jdx, stepResult := range pickleStepResults { + cukeStep := f.buildCukeStep(pickle, stepResult) + + stepResultFinishedAt := stepResult.FinishedAt + d := int(stepResultFinishedAt.Sub(stepStartedAt).Nanoseconds()) + stepStartedAt = stepResultFinishedAt + + cukeStep.Result.Duration = &d + if stepResult.Status == undefined || + stepResult.Status == pending || + stepResult.Status == skipped || + stepResult.Status == ambiguous { + cukeStep.Result.Duration = nil + } + + cukeElement.Steps[jdx] = cukeStep + } + + res[idx] = cukeElement + } + + return res +} + +type cukeComment struct { + Value string `json:"value"` + Line int `json:"line"` +} + +type cukeDocstring struct { + Value string `json:"value"` + ContentType string `json:"content_type"` + Line int `json:"line"` +} + +type cukeTag struct { + Name string `json:"name"` + Line int `json:"line"` +} + +type cukeResult struct { + Status string `json:"status"` + Error string `json:"error_message,omitempty"` + Duration *int `json:"duration,omitempty"` +} + +type cukeMatch struct { + Location string `json:"location"` +} + +type cukeEmbedding struct { + Name string `json:"name"` + MimeType string `json:"mime_type"` + Data string `json:"data"` +} + +type cukeStep struct { + Keyword string `json:"keyword"` + Name string `json:"name"` + Line int `json:"line"` + Docstring *cukeDocstring `json:"doc_string,omitempty"` + Match cukeMatch `json:"match"` + Result cukeResult `json:"result"` + DataTable []*cukeDataTableRow `json:"rows,omitempty"` + Embeddings []cukeEmbedding `json:"embeddings,omitempty"` +} + +type cukeDataTableRow struct { + Cells []string `json:"cells"` +} + +type cukeElement struct { + ID string `json:"id"` + Keyword string `json:"keyword"` + Name string `json:"name"` + Description string `json:"description"` + Line int `json:"line"` + Type string `json:"type"` + Tags []cukeTag `json:"tags,omitempty"` + Steps []cukeStep `json:"steps,omitempty"` +} + +// CukeFeatureJSON ... +type CukeFeatureJSON struct { + URI string `json:"uri"` + ID string `json:"id"` + Keyword string `json:"keyword"` + Name string `json:"name"` + Description string `json:"description"` + Line int `json:"line"` + Comments []cukeComment `json:"comments,omitempty"` + Tags []cukeTag `json:"tags,omitempty"` + Elements []cukeElement `json:"elements,omitempty"` +} + +func buildCukeFeature(feat *models.Feature) CukeFeatureJSON { + cukeFeature := CukeFeatureJSON{ + URI: feat.Uri, + ID: makeCukeID(feat.Feature.Name), + Keyword: feat.Feature.Keyword, + Name: feat.Feature.Name, + Description: feat.Feature.Description, + Line: int(feat.Feature.Location.Line), + Comments: make([]cukeComment, len(feat.Comments)), + Tags: make([]cukeTag, len(feat.Feature.Tags)), + } + + for idx, element := range feat.Feature.Tags { + cukeFeature.Tags[idx].Line = int(element.Location.Line) + cukeFeature.Tags[idx].Name = element.Name + } + + for idx, comment := range feat.Comments { + cukeFeature.Comments[idx].Value = strings.TrimSpace(comment.Text) + cukeFeature.Comments[idx].Line = int(comment.Location.Line) + } + + return cukeFeature +} + +func (f *Cuke) buildCukeElement(pickle *messages.Pickle) (cukeElement cukeElement) { + feature := f.Storage.MustGetFeature(pickle.Uri) + scenario := feature.FindScenario(pickle.AstNodeIds[0]) + + cukeElement.Name = pickle.Name + cukeElement.Line = int(scenario.Location.Line) + cukeElement.Description = scenario.Description + cukeElement.Keyword = scenario.Keyword + cukeElement.Type = "scenario" + + cukeElement.Tags = make([]cukeTag, len(scenario.Tags)) + for idx, element := range scenario.Tags { + cukeElement.Tags[idx].Line = int(element.Location.Line) + cukeElement.Tags[idx].Name = element.Name + } + + if len(pickle.AstNodeIds) == 1 { + return + } + + example, _ := feature.FindExample(pickle.AstNodeIds[1]) + + for _, tag := range example.Tags { + tag := cukeTag{Line: int(tag.Location.Line), Name: tag.Name} + cukeElement.Tags = append(cukeElement.Tags, tag) + } + + examples := scenario.Examples + if len(examples) > 0 { + rowID := pickle.AstNodeIds[1] + + for _, example := range examples { + for idx, row := range example.TableBody { + if rowID == row.Id { + cukeElement.ID += fmt.Sprintf(";%s;%d", makeCukeID(example.Name), idx+2) + cukeElement.Line = int(row.Location.Line) + } + } + } + } + + return cukeElement +} + +func (f *Cuke) buildCukeStep(pickle *messages.Pickle, stepResult models.PickleStepResult) (cukeStep cukeStep) { + feature := f.Storage.MustGetFeature(pickle.Uri) + pickleStep := f.Storage.MustGetPickleStep(stepResult.PickleStepID) + step := feature.FindStep(pickleStep.AstNodeIds[0]) + + line := step.Location.Line + + cukeStep.Name = pickleStep.Text + cukeStep.Line = int(line) + cukeStep.Keyword = step.Keyword + + arg := pickleStep.Argument + + if arg != nil { + if arg.DocString != nil && step.DocString != nil { + cukeStep.Docstring = &cukeDocstring{} + cukeStep.Docstring.ContentType = strings.TrimSpace(arg.DocString.MediaType) + if step.Location != nil { + cukeStep.Docstring.Line = int(step.DocString.Location.Line) + } + cukeStep.Docstring.Value = arg.DocString.Content + } + + if arg.DataTable != nil { + cukeStep.DataTable = make([]*cukeDataTableRow, len(arg.DataTable.Rows)) + for i, row := range arg.DataTable.Rows { + cells := make([]string, len(row.Cells)) + for j, cell := range row.Cells { + cells[j] = cell.Value + } + cukeStep.DataTable[i] = &cukeDataTableRow{Cells: cells} + } + } + } + + if stepResult.Def != nil { + cukeStep.Match.Location = strings.Split(DefinitionID(stepResult.Def), " ")[0] + } + + cukeStep.Result.Status = stepResult.Status.String() + if stepResult.Err != nil { + cukeStep.Result.Error = stepResult.Err.Error() + } + + if stepResult.Status == undefined || stepResult.Status == pending || stepResult.Status == ambiguous { + cukeStep.Match.Location = fmt.Sprintf("%s:%d", pickle.Uri, step.Location.Line) + } + + if stepResult.Attachments != nil { + attachments := []cukeEmbedding{} + + for _, a := range stepResult.Attachments { + attachments = append(attachments, cukeEmbedding{ + Name: a.Name, + Data: base64.StdEncoding.EncodeToString(a.Data), + MimeType: a.MimeType, + }) + } + + if len(attachments) > 0 { + cukeStep.Embeddings = attachments + } + } + return cukeStep +} + +func makeCukeID(name string) string { + return strings.Replace(strings.ToLower(name), " ", "-", -1) +} diff --git a/vendor/github.com/cucumber/godog/internal/formatters/fmt_events.go b/vendor/github.com/cucumber/godog/internal/formatters/fmt_events.go new file mode 100644 index 000000000..c5ffcb50e --- /dev/null +++ b/vendor/github.com/cucumber/godog/internal/formatters/fmt_events.go @@ -0,0 +1,346 @@ +package formatters + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/cucumber/godog/formatters" + "github.com/cucumber/godog/internal/utils" + messages "github.com/cucumber/messages/go/v21" +) + +const nanoSec = 1000000 +const spec = "0.1.0" + +func init() { + formatters.Format("events", fmt.Sprintf("Produces JSON event stream, based on spec: %s.", spec), EventsFormatterFunc) +} + +// EventsFormatterFunc implements the FormatterFunc for the events formatter +func EventsFormatterFunc(suite string, out io.Writer) formatters.Formatter { + return &Events{Base: NewBase(suite, out)} +} + +// Events - Events formatter +type Events struct { + *Base +} + +func (f *Events) event(ev interface{}) { + data, err := json.Marshal(ev) + if err != nil { + panic(fmt.Sprintf("failed to marshal stream event: %+v - %v", ev, err)) + } + fmt.Fprintln(f.out, string(data)) +} + +// Pickle receives scenario. +func (f *Events) Pickle(pickle *messages.Pickle) { + f.Base.Pickle(pickle) + + f.Lock.Lock() + defer f.Lock.Unlock() + + f.event(&struct { + Event string `json:"event"` + Location string `json:"location"` + Timestamp int64 `json:"timestamp"` + }{ + "TestCaseStarted", + f.scenarioLocation(pickle), + utils.TimeNowFunc().UnixNano() / nanoSec, + }) + + if len(pickle.Steps) == 0 { + // @TODO: is status undefined or passed? when there are no steps + // for this scenario + f.event(&struct { + Event string `json:"event"` + Location string `json:"location"` + Timestamp int64 `json:"timestamp"` + Status string `json:"status"` + }{ + "TestCaseFinished", + f.scenarioLocation(pickle), + utils.TimeNowFunc().UnixNano() / nanoSec, + "undefined", + }) + } +} + +// TestRunStarted is triggered on test start. +func (f *Events) TestRunStarted() { + f.Base.TestRunStarted() + + f.Lock.Lock() + defer f.Lock.Unlock() + + f.event(&struct { + Event string `json:"event"` + Version string `json:"version"` + Timestamp int64 `json:"timestamp"` + Suite string `json:"suite"` + }{ + "TestRunStarted", + spec, + utils.TimeNowFunc().UnixNano() / nanoSec, + f.suiteName, + }) +} + +// Feature receives gherkin document. +func (f *Events) Feature(ft *messages.GherkinDocument, p string, c []byte) { + f.Base.Feature(ft, p, c) + + f.Lock.Lock() + defer f.Lock.Unlock() + + f.event(&struct { + Event string `json:"event"` + Location string `json:"location"` + Source string `json:"source"` + }{ + "TestSource", + fmt.Sprintf("%s:%d", p, ft.Feature.Location.Line), + string(c), + }) +} + +// Summary pushes summary information to JSON stream. +func (f *Events) Summary() { + // @TODO: determine status + status := passed + + f.Storage.MustGetPickleStepResultsByStatus(failed) + + if len(f.Storage.MustGetPickleStepResultsByStatus(failed)) > 0 { + status = failed + } else if len(f.Storage.MustGetPickleStepResultsByStatus(passed)) == 0 { + if len(f.Storage.MustGetPickleStepResultsByStatus(undefined)) > len(f.Storage.MustGetPickleStepResultsByStatus(pending)) { + status = undefined + } else { + status = pending + } + } + + snips := f.Snippets() + if len(snips) > 0 { + snips = "You can implement step definitions for undefined steps with these snippets:\n" + snips + } + + f.event(&struct { + Event string `json:"event"` + Status string `json:"status"` + Timestamp int64 `json:"timestamp"` + Snippets string `json:"snippets"` + Memory string `json:"memory"` + }{ + "TestRunFinished", + status.String(), + utils.TimeNowFunc().UnixNano() / nanoSec, + snips, + "", // @TODO not sure that could be correctly implemented + }) +} + +func (f *Events) step(pickle *messages.Pickle, pickleStep *messages.PickleStep) { + feature := f.Storage.MustGetFeature(pickle.Uri) + pickleStepResult := f.Storage.MustGetPickleStepResult(pickleStep.Id) + step := feature.FindStep(pickleStep.AstNodeIds[0]) + + var errMsg string + if pickleStepResult.Err != nil { + errMsg = pickleStepResult.Err.Error() + } + + if pickleStepResult.Attachments != nil { + for _, attachment := range pickleStepResult.Attachments { + + f.event(&struct { + Event string `json:"event"` + Location string `json:"location"` + Timestamp int64 `json:"timestamp"` + ContentEncoding string `json:"contentEncoding"` + FileName string `json:"fileName"` + MimeType string `json:"mimeType"` + Body string `json:"body"` + }{ + "Attachment", + fmt.Sprintf("%s:%d", pickle.Uri, step.Location.Line), + utils.TimeNowFunc().UnixNano() / nanoSec, + messages.AttachmentContentEncoding_BASE64.String(), + attachment.Name, + attachment.MimeType, + string(attachment.Data), + }) + + } + } + + f.event(&struct { + Event string `json:"event"` + Location string `json:"location"` + Timestamp int64 `json:"timestamp"` + Status string `json:"status"` + Summary string `json:"summary,omitempty"` + }{ + "TestStepFinished", + fmt.Sprintf("%s:%d", pickle.Uri, step.Location.Line), + utils.TimeNowFunc().UnixNano() / nanoSec, + pickleStepResult.Status.String(), + errMsg, + }) + + if isLastStep(pickle, pickleStep) { + var status string + + pickleStepResults := f.Storage.MustGetPickleStepResultsByPickleID(pickle.Id) + for _, stepResult := range pickleStepResults { + switch stepResult.Status { + case passed, failed, undefined, pending, ambiguous: + status = stepResult.Status.String() + } + } + + f.event(&struct { + Event string `json:"event"` + Location string `json:"location"` + Timestamp int64 `json:"timestamp"` + Status string `json:"status"` + }{ + "TestCaseFinished", + f.scenarioLocation(pickle), + utils.TimeNowFunc().UnixNano() / nanoSec, + status, + }) + } +} + +// Defined receives step definition. +func (f *Events) Defined(pickle *messages.Pickle, pickleStep *messages.PickleStep, def *formatters.StepDefinition) { + f.Base.Defined(pickle, pickleStep, def) + + f.Lock.Lock() + defer f.Lock.Unlock() + + feature := f.Storage.MustGetFeature(pickle.Uri) + step := feature.FindStep(pickleStep.AstNodeIds[0]) + + if def != nil { + matchedDef := f.Storage.MustGetStepDefintionMatch(pickleStep.AstNodeIds[0]) + + m := def.Expr.FindStringSubmatchIndex(pickleStep.Text)[2:] + var args [][2]int + for i := 0; i < len(m)/2; i++ { + pair := m[i : i*2+2] + var idxs [2]int + idxs[0] = pair[0] + idxs[1] = pair[1] + args = append(args, idxs) + } + + if len(args) == 0 { + args = make([][2]int, 0) + } + + f.event(&struct { + Event string `json:"event"` + Location string `json:"location"` + DefID string `json:"definition_id"` + Args [][2]int `json:"arguments"` + }{ + "StepDefinitionFound", + fmt.Sprintf("%s:%d", pickle.Uri, step.Location.Line), + DefinitionID(matchedDef), + args, + }) + } + + f.event(&struct { + Event string `json:"event"` + Location string `json:"location"` + Timestamp int64 `json:"timestamp"` + }{ + "TestStepStarted", + fmt.Sprintf("%s:%d", pickle.Uri, step.Location.Line), + utils.TimeNowFunc().UnixNano() / nanoSec, + }) +} + +// Passed captures passed step. +func (f *Events) Passed(pickle *messages.Pickle, step *messages.PickleStep, match *formatters.StepDefinition) { + f.Base.Passed(pickle, step, match) + + f.Lock.Lock() + defer f.Lock.Unlock() + + f.step(pickle, step) +} + +// Skipped captures skipped step. +func (f *Events) Skipped(pickle *messages.Pickle, step *messages.PickleStep, match *formatters.StepDefinition) { + f.Base.Skipped(pickle, step, match) + + f.Lock.Lock() + defer f.Lock.Unlock() + + f.step(pickle, step) +} + +// Undefined captures undefined step. +func (f *Events) Undefined(pickle *messages.Pickle, step *messages.PickleStep, match *formatters.StepDefinition) { + f.Base.Undefined(pickle, step, match) + + f.Lock.Lock() + defer f.Lock.Unlock() + + f.step(pickle, step) +} + +// Failed captures failed step. +func (f *Events) Failed(pickle *messages.Pickle, step *messages.PickleStep, match *formatters.StepDefinition, err error) { + f.Base.Failed(pickle, step, match, err) + + f.Lock.Lock() + defer f.Lock.Unlock() + + f.step(pickle, step) +} + +// Pending captures pending step. +func (f *Events) Pending(pickle *messages.Pickle, step *messages.PickleStep, match *formatters.StepDefinition) { + f.Base.Pending(pickle, step, match) + + f.Lock.Lock() + defer f.Lock.Unlock() + + f.step(pickle, step) +} + +// Ambiguous captures ambiguous step. +func (f *Events) Ambiguous(pickle *messages.Pickle, step *messages.PickleStep, match *formatters.StepDefinition, err error) { + f.Base.Ambiguous(pickle, step, match, err) + + f.Lock.Lock() + defer f.Lock.Unlock() + + f.step(pickle, step) +} + +func (f *Events) scenarioLocation(pickle *messages.Pickle) string { + feature := f.Storage.MustGetFeature(pickle.Uri) + scenario := feature.FindScenario(pickle.AstNodeIds[0]) + + line := scenario.Location.Line + if len(pickle.AstNodeIds) == 2 { + _, row := feature.FindExample(pickle.AstNodeIds[1]) + line = row.Location.Line + } + + return fmt.Sprintf("%s:%d", pickle.Uri, line) +} + +func isLastStep(pickle *messages.Pickle, step *messages.PickleStep) bool { + return pickle.Steps[len(pickle.Steps)-1].Id == step.Id +} diff --git a/vendor/github.com/cucumber/godog/internal/formatters/fmt_flushwrap.go b/vendor/github.com/cucumber/godog/internal/formatters/fmt_flushwrap.go new file mode 100644 index 000000000..129b06210 --- /dev/null +++ b/vendor/github.com/cucumber/godog/internal/formatters/fmt_flushwrap.go @@ -0,0 +1,108 @@ +package formatters + +import ( + "sync" + + "github.com/cucumber/godog/formatters" + messages "github.com/cucumber/messages/go/v21" +) + +// WrapOnFlush wrap a `formatters.Formatter` in a `formatters.FlushFormatter`, which only +// executes when `Flush` is called +func WrapOnFlush(fmt formatters.Formatter) formatters.FlushFormatter { + return &onFlushFormatter{ + fmt: fmt, + fns: make([]func(), 0), + mu: &sync.Mutex{}, + } +} + +type onFlushFormatter struct { + fmt formatters.Formatter + fns []func() + mu *sync.Mutex +} + +func (o *onFlushFormatter) Pickle(pickle *messages.Pickle) { + o.fns = append(o.fns, func() { + o.fmt.Pickle(pickle) + }) +} + +func (o *onFlushFormatter) Passed(pickle *messages.Pickle, step *messages.PickleStep, definition *formatters.StepDefinition) { + o.fns = append(o.fns, func() { + o.fmt.Passed(pickle, step, definition) + }) +} + +// Ambiguous implements formatters.Formatter. +func (o *onFlushFormatter) Ambiguous(pickle *messages.Pickle, step *messages.PickleStep, definition *formatters.StepDefinition, err error) { + o.fns = append(o.fns, func() { + o.fmt.Ambiguous(pickle, step, definition, err) + }) +} + +// Defined implements formatters.Formatter. +func (o *onFlushFormatter) Defined(pickle *messages.Pickle, step *messages.PickleStep, definition *formatters.StepDefinition) { + o.fns = append(o.fns, func() { + o.fmt.Defined(pickle, step, definition) + }) +} + +// Failed implements formatters.Formatter. +func (o *onFlushFormatter) Failed(pickle *messages.Pickle, step *messages.PickleStep, definition *formatters.StepDefinition, err error) { + o.fns = append(o.fns, func() { + o.fmt.Failed(pickle, step, definition, err) + }) +} + +// Feature implements formatters.Formatter. +func (o *onFlushFormatter) Feature(pickle *messages.GherkinDocument, p string, c []byte) { + o.fns = append(o.fns, func() { + o.fmt.Feature(pickle, p, c) + }) +} + +// Pending implements formatters.Formatter. +func (o *onFlushFormatter) Pending(pickle *messages.Pickle, step *messages.PickleStep, definition *formatters.StepDefinition) { + o.fns = append(o.fns, func() { + o.fmt.Pending(pickle, step, definition) + }) +} + +// Skipped implements formatters.Formatter. +func (o *onFlushFormatter) Skipped(pickle *messages.Pickle, step *messages.PickleStep, definition *formatters.StepDefinition) { + o.fns = append(o.fns, func() { + o.fmt.Skipped(pickle, step, definition) + }) +} + +// Summary implements formatters.Formatter. +func (o *onFlushFormatter) Summary() { + o.fns = append(o.fns, func() { + o.fmt.Summary() + }) +} + +// TestRunStarted implements formatters.Formatter. +func (o *onFlushFormatter) TestRunStarted() { + o.fns = append(o.fns, func() { + o.fmt.TestRunStarted() + }) +} + +// Undefined implements formatters.Formatter. +func (o *onFlushFormatter) Undefined(pickle *messages.Pickle, step *messages.PickleStep, definition *formatters.StepDefinition) { + o.fns = append(o.fns, func() { + o.fmt.Undefined(pickle, step, definition) + }) +} + +// Flush the logs. +func (o *onFlushFormatter) Flush() { + o.mu.Lock() + defer o.mu.Unlock() + for _, fn := range o.fns { + fn() + } +} diff --git a/vendor/github.com/cucumber/godog/internal/formatters/fmt_junit.go b/vendor/github.com/cucumber/godog/internal/formatters/fmt_junit.go new file mode 100644 index 000000000..85acabe2e --- /dev/null +++ b/vendor/github.com/cucumber/godog/internal/formatters/fmt_junit.go @@ -0,0 +1,246 @@ +package formatters + +import ( + "encoding/xml" + "fmt" + "io" + "os" + "sort" + "strconv" + "time" + + "github.com/cucumber/godog/formatters" + "github.com/cucumber/godog/internal/models" + "github.com/cucumber/godog/internal/utils" +) + +func init() { + formatters.Format("junit", "Prints junit compatible xml to stdout", JUnitFormatterFunc) +} + +// JUnitFormatterFunc implements the FormatterFunc for the junit formatter +func JUnitFormatterFunc(suite string, out io.Writer) formatters.Formatter { + return &JUnit{Base: NewBase(suite, out)} +} + +// JUnit renders test results in JUnit format. +type JUnit struct { + *Base +} + +// Summary renders summary information. +func (f *JUnit) Summary() { + suite := f.buildJUNITPackageSuite() + + _, err := io.WriteString(f.out, xml.Header) + if err != nil { + fmt.Fprintln(os.Stderr, "failed to write junit string:", err) + } + + enc := xml.NewEncoder(f.out) + enc.Indent("", s(2)) + if err = enc.Encode(suite); err != nil { + fmt.Fprintln(os.Stderr, "failed to write junit xml:", err) + } +} + +func junitTimeDuration(from, to time.Time) string { + return strconv.FormatFloat(to.Sub(from).Seconds(), 'f', -1, 64) +} + +// getPickleResult deals with the fact that if there's no result due to 'StopOnFirstFailure' being +// set, MustGetPickleResult panics. +func (f *JUnit) getPickleResult(pickleID string) (res *models.PickleResult) { + defer func() { + if r := recover(); r != nil { + res = nil + } + }() + pr := f.Storage.MustGetPickleResult(pickleID) + res = &pr + return +} + +func (f *JUnit) getPickleStepResult(stepID string) (res *models.PickleStepResult) { + defer func() { + if r := recover(); r != nil { + res = nil + } + }() + psr := f.Storage.MustGetPickleStepResult(stepID) + res = &psr + return +} + +func (f *JUnit) getPickleStepResultsByPickleID(pickleID string) (res []models.PickleStepResult) { + defer func() { + if r := recover(); r != nil { + res = nil + } + }() + res = f.Storage.MustGetPickleStepResultsByPickleID(pickleID) + return +} + +func (f *JUnit) buildJUNITPackageSuite() JunitPackageSuite { + features := f.Storage.MustGetFeatures() + sort.Sort(sortFeaturesByName(features)) + + testRunStartedAt := f.Storage.MustGetTestRunStarted().StartedAt + + suite := JunitPackageSuite{ + Name: f.suiteName, + TestSuites: make([]*junitTestSuite, len(features)), + Time: junitTimeDuration(testRunStartedAt, utils.TimeNowFunc()), + } + + for idx, feature := range features { + pickles := f.Storage.MustGetPickles(feature.Uri) + sort.Sort(sortPicklesByID(pickles)) + + ts := junitTestSuite{ + Name: feature.Feature.Name, + TestCases: make([]*junitTestCase, len(pickles)), + } + + var testcaseNames = make(map[string]int) + for _, pickle := range pickles { + testcaseNames[pickle.Name] = testcaseNames[pickle.Name] + 1 + } + + firstPickleStartedAt := testRunStartedAt + lastPickleFinishedAt := testRunStartedAt + + var outlineNo = make(map[string]int) + for idx, pickle := range pickles { + tc := junitTestCase{} + tc.Name = pickle.Name + if testcaseNames[tc.Name] > 1 { + outlineNo[tc.Name] = outlineNo[tc.Name] + 1 + tc.Name += fmt.Sprintf(" #%d", outlineNo[tc.Name]) + } + + pickleResult := f.getPickleResult(pickle.Id) + if pickleResult == nil { + tc.Status = skipped.String() + } else { + if idx == 0 { + firstPickleStartedAt = pickleResult.StartedAt + } + lastPickleFinishedAt = pickleResult.StartedAt + } + + if len(pickle.Steps) > 0 { + lastStep := pickle.Steps[len(pickle.Steps)-1] + if lastPickleStepResult := f.getPickleStepResult(lastStep.Id); lastPickleStepResult != nil { + lastPickleFinishedAt = lastPickleStepResult.FinishedAt + } + } + + if pickleResult != nil { + tc.Time = junitTimeDuration(pickleResult.StartedAt, lastPickleFinishedAt) + } + + ts.Tests++ + suite.Tests++ + + pickleStepResults := f.getPickleStepResultsByPickleID(pickle.Id) + for _, stepResult := range pickleStepResults { + pickleStep := f.Storage.MustGetPickleStep(stepResult.PickleStepID) + + switch stepResult.Status { + case passed: + tc.Status = passed.String() + case failed: + tc.Status = failed.String() + tc.Failure = &junitFailure{ + Message: fmt.Sprintf("Step %s: %s", pickleStep.Text, stepResult.Err), + } + case ambiguous: + tc.Status = ambiguous.String() + tc.Error = append(tc.Error, &junitError{ + Type: "ambiguous", + Message: fmt.Sprintf("Step %s", pickleStep.Text), + }) + case skipped: + tc.Error = append(tc.Error, &junitError{ + Type: "skipped", + Message: fmt.Sprintf("Step %s", pickleStep.Text), + }) + case undefined: + tc.Status = undefined.String() + tc.Error = append(tc.Error, &junitError{ + Type: "undefined", + Message: fmt.Sprintf("Step %s", pickleStep.Text), + }) + case pending: + tc.Status = pending.String() + tc.Error = append(tc.Error, &junitError{ + Type: "pending", + Message: fmt.Sprintf("Step %s: TODO: write pending definition", pickleStep.Text), + }) + } + } + + switch tc.Status { + case failed.String(): + ts.Failures++ + suite.Failures++ + case undefined.String(), pending.String(): + ts.Errors++ + suite.Errors++ + } + + ts.TestCases[idx] = &tc + } + + ts.Time = junitTimeDuration(firstPickleStartedAt, lastPickleFinishedAt) + + suite.TestSuites[idx] = &ts + } + + return suite +} + +type junitFailure struct { + Message string `xml:"message,attr"` + Type string `xml:"type,attr,omitempty"` +} + +type junitError struct { + XMLName xml.Name `xml:"error,omitempty"` + Message string `xml:"message,attr"` + Type string `xml:"type,attr"` +} + +type junitTestCase struct { + XMLName xml.Name `xml:"testcase"` + Name string `xml:"name,attr"` + Status string `xml:"status,attr"` + Time string `xml:"time,attr"` + Failure *junitFailure `xml:"failure,omitempty"` + Error []*junitError +} + +type junitTestSuite struct { + XMLName xml.Name `xml:"testsuite"` + Name string `xml:"name,attr"` + Tests int `xml:"tests,attr"` + Skipped int `xml:"skipped,attr"` + Failures int `xml:"failures,attr"` + Errors int `xml:"errors,attr"` + Time string `xml:"time,attr"` + TestCases []*junitTestCase +} + +// JunitPackageSuite ... +type JunitPackageSuite struct { + XMLName xml.Name `xml:"testsuites"` + Name string `xml:"name,attr"` + Tests int `xml:"tests,attr"` + Skipped int `xml:"skipped,attr"` + Failures int `xml:"failures,attr"` + Errors int `xml:"errors,attr"` + Time string `xml:"time,attr"` + TestSuites []*junitTestSuite +} diff --git a/vendor/github.com/cucumber/godog/internal/formatters/fmt_multi.go b/vendor/github.com/cucumber/godog/internal/formatters/fmt_multi.go new file mode 100644 index 000000000..001c99809 --- /dev/null +++ b/vendor/github.com/cucumber/godog/internal/formatters/fmt_multi.go @@ -0,0 +1,139 @@ +package formatters + +import ( + "io" + + "github.com/cucumber/godog/formatters" + "github.com/cucumber/godog/internal/storage" + messages "github.com/cucumber/messages/go/v21" +) + +// MultiFormatter passes test progress to multiple formatters. +type MultiFormatter struct { + formatters []formatter + repeater repeater +} + +type formatter struct { + fmt formatters.FormatterFunc + out io.Writer +} + +type repeater []formatters.Formatter + +type storageFormatter interface { + SetStorage(s *storage.Storage) +} + +// SetStorage passes storage to all added formatters. +func (r repeater) SetStorage(s *storage.Storage) { + for _, f := range r { + if ss, ok := f.(storageFormatter); ok { + ss.SetStorage(s) + } + } +} + +// TestRunStarted triggers TestRunStarted for all added formatters. +func (r repeater) TestRunStarted() { + for _, f := range r { + f.TestRunStarted() + } +} + +// Feature triggers Feature for all added formatters. +func (r repeater) Feature(document *messages.GherkinDocument, s string, bytes []byte) { + for _, f := range r { + f.Feature(document, s, bytes) + } +} + +// Pickle triggers Pickle for all added formatters. +func (r repeater) Pickle(pickle *messages.Pickle) { + for _, f := range r { + f.Pickle(pickle) + } +} + +// Defined triggers Defined for all added formatters. +func (r repeater) Defined(pickle *messages.Pickle, step *messages.PickleStep, definition *formatters.StepDefinition) { + for _, f := range r { + f.Defined(pickle, step, definition) + } +} + +// Failed triggers Failed for all added formatters. +func (r repeater) Failed(pickle *messages.Pickle, step *messages.PickleStep, definition *formatters.StepDefinition, err error) { + for _, f := range r { + f.Failed(pickle, step, definition, err) + } +} + +// Passed triggers Passed for all added formatters. +func (r repeater) Passed(pickle *messages.Pickle, step *messages.PickleStep, definition *formatters.StepDefinition) { + for _, f := range r { + f.Passed(pickle, step, definition) + } +} + +// Skipped triggers Skipped for all added formatters. +func (r repeater) Skipped(pickle *messages.Pickle, step *messages.PickleStep, definition *formatters.StepDefinition) { + for _, f := range r { + f.Skipped(pickle, step, definition) + } +} + +// Undefined triggers Undefined for all added formatters. +func (r repeater) Undefined(pickle *messages.Pickle, step *messages.PickleStep, definition *formatters.StepDefinition) { + for _, f := range r { + f.Undefined(pickle, step, definition) + } +} + +// Pending triggers Pending for all added formatters. +func (r repeater) Pending(pickle *messages.Pickle, step *messages.PickleStep, definition *formatters.StepDefinition) { + for _, f := range r { + f.Pending(pickle, step, definition) + } +} + +// Ambiguous triggers Ambiguous for all added formatters. +func (r repeater) Ambiguous(pickle *messages.Pickle, step *messages.PickleStep, definition *formatters.StepDefinition, err error) { + for _, f := range r { + f.Ambiguous(pickle, step, definition, err) + } +} + +// Summary triggers Summary for all added formatters. +func (r repeater) Summary() { + for _, f := range r { + f.Summary() + } +} + +// Add adds formatter with output writer. +func (m *MultiFormatter) Add(name string, out io.Writer) { + f := formatters.FindFmt(name) + if f == nil { + panic("formatter not found: " + name) + } + + m.formatters = append(m.formatters, formatter{ + fmt: f, + out: out, + }) +} + +// FormatterFunc implements the FormatterFunc for the multi formatter. +func (m *MultiFormatter) FormatterFunc(suite string, out io.Writer) formatters.Formatter { + for _, f := range m.formatters { + out := out + if f.out != nil { + out = f.out + } + + m.repeater = append(m.repeater, f.fmt(suite, out)) + } + + return m.repeater +} diff --git a/vendor/github.com/cucumber/godog/internal/formatters/fmt_pretty.go b/vendor/github.com/cucumber/godog/internal/formatters/fmt_pretty.go new file mode 100644 index 000000000..76d733793 --- /dev/null +++ b/vendor/github.com/cucumber/godog/internal/formatters/fmt_pretty.go @@ -0,0 +1,586 @@ +package formatters + +import ( + "fmt" + "io" + "regexp" + "sort" + "strings" + "unicode/utf8" + + messages "github.com/cucumber/messages/go/v21" + + "github.com/cucumber/godog/colors" + "github.com/cucumber/godog/formatters" + "github.com/cucumber/godog/internal/models" +) + +func init() { + formatters.Format("pretty", "Prints every feature with runtime statuses.", PrettyFormatterFunc) +} + +// PrettyFormatterFunc implements the FormatterFunc for the pretty formatter +func PrettyFormatterFunc(suite string, out io.Writer) formatters.Formatter { + return &Pretty{Base: NewBase(suite, out)} +} + +var outlinePlaceholderRegexp = regexp.MustCompile("<[^>]+>") + +// Pretty is a formatter for readable output. +type Pretty struct { + *Base + firstFeature *bool +} + +// TestRunStarted is triggered on test start. +func (f *Pretty) TestRunStarted() { + f.Base.TestRunStarted() + + f.Lock.Lock() + defer f.Lock.Unlock() + + firstFeature := true + f.firstFeature = &firstFeature +} + +// Feature receives gherkin document. +func (f *Pretty) Feature(gd *messages.GherkinDocument, p string, c []byte) { + f.Lock.Lock() + if !*f.firstFeature { + fmt.Fprintln(f.out, "") + } + + *f.firstFeature = false + f.Lock.Unlock() + + f.Base.Feature(gd, p, c) + + f.Lock.Lock() + defer f.Lock.Unlock() + + f.printFeature(gd.Feature) +} + +// Pickle takes a gherkin node for formatting. +func (f *Pretty) Pickle(pickle *messages.Pickle) { + f.Base.Pickle(pickle) + + f.Lock.Lock() + defer f.Lock.Unlock() + + if len(pickle.Steps) == 0 { + f.printUndefinedPickle(pickle) + return + } +} + +// Passed captures passed step. +func (f *Pretty) Passed(pickle *messages.Pickle, step *messages.PickleStep, match *formatters.StepDefinition) { + f.Base.Passed(pickle, step, match) + + f.Lock.Lock() + defer f.Lock.Unlock() + + f.printStep(pickle, step) +} + +// Skipped captures skipped step. +func (f *Pretty) Skipped(pickle *messages.Pickle, step *messages.PickleStep, match *formatters.StepDefinition) { + f.Base.Skipped(pickle, step, match) + + f.Lock.Lock() + defer f.Lock.Unlock() + + f.printStep(pickle, step) +} + +// Undefined captures undefined step. +func (f *Pretty) Undefined(pickle *messages.Pickle, step *messages.PickleStep, match *formatters.StepDefinition) { + f.Base.Undefined(pickle, step, match) + + f.Lock.Lock() + defer f.Lock.Unlock() + + f.printStep(pickle, step) +} + +// Failed captures failed step. +func (f *Pretty) Failed(pickle *messages.Pickle, step *messages.PickleStep, match *formatters.StepDefinition, err error) { + f.Base.Failed(pickle, step, match, err) + + f.Lock.Lock() + defer f.Lock.Unlock() + + f.printStep(pickle, step) +} + +// Failed captures failed step. +func (f *Pretty) Ambiguous(pickle *messages.Pickle, step *messages.PickleStep, match *formatters.StepDefinition, err error) { + f.Base.Ambiguous(pickle, step, match, err) + + f.Lock.Lock() + defer f.Lock.Unlock() + + f.printStep(pickle, step) +} + +// Pending captures pending step. +func (f *Pretty) Pending(pickle *messages.Pickle, step *messages.PickleStep, match *formatters.StepDefinition) { + f.Base.Pending(pickle, step, match) + + f.Lock.Lock() + defer f.Lock.Unlock() + + f.printStep(pickle, step) +} + +func (f *Pretty) printFeature(feature *messages.Feature) { + fmt.Fprintln(f.out, keywordAndName(feature.Keyword, feature.Name)) + if strings.TrimSpace(feature.Description) != "" { + for _, line := range strings.Split(feature.Description, "\n") { + fmt.Fprintln(f.out, s(f.indent)+strings.TrimSpace(line)) + } + } +} + +func keywordAndName(keyword, name string) string { + title := whiteb(keyword + ":") + if len(name) > 0 { + title += " " + name + } + return title +} + +func (f *Pretty) scenarioLengths(pickle *messages.Pickle) (scenarioHeaderLength int, maxLength int) { + feature := f.Storage.MustGetFeature(pickle.Uri) + astScenario := feature.FindScenario(pickle.AstNodeIds[0]) + astBackground := feature.FindBackground(pickle.AstNodeIds[0]) + + scenarioHeaderLength = f.lengthPickle(astScenario.Keyword, astScenario.Name) + maxLength = f.longestStep(astScenario.Steps, scenarioHeaderLength) + + if astBackground != nil { + maxLength = f.longestStep(astBackground.Steps, maxLength) + } + + return scenarioHeaderLength, maxLength +} + +func (f *Pretty) printScenarioHeader(pickle *messages.Pickle, astScenario *messages.Scenario, spaceFilling int) { + feature := f.Storage.MustGetFeature(pickle.Uri) + text := s(f.indent) + keywordAndName(astScenario.Keyword, astScenario.Name) + text += s(spaceFilling) + line(feature.Uri, astScenario.Location) + fmt.Fprintln(f.out, "\n"+text) +} + +func (f *Pretty) printUndefinedPickle(pickle *messages.Pickle) { + feature := f.Storage.MustGetFeature(pickle.Uri) + astScenario := feature.FindScenario(pickle.AstNodeIds[0]) + astBackground := feature.FindBackground(pickle.AstNodeIds[0]) + + scenarioHeaderLength, maxLength := f.scenarioLengths(pickle) + + if astBackground != nil { + fmt.Fprintln(f.out, "\n"+s(f.indent)+keywordAndName(astBackground.Keyword, astBackground.Name)) + for _, step := range astBackground.Steps { + text := s(f.indent*2) + cyan(strings.TrimSpace(step.Keyword)) + " " + cyan(step.Text) + fmt.Fprintln(f.out, text) + } + } + + // do not print scenario headers and examples multiple times + if len(astScenario.Examples) > 0 { + exampleTable, exampleRow := feature.FindExample(pickle.AstNodeIds[1]) + firstExampleRow := exampleTable.TableBody[0].Id == exampleRow.Id + firstExamplesTable := astScenario.Examples[0].Location.Line == exampleTable.Location.Line + + if !(firstExamplesTable && firstExampleRow) { + return + } + } + + f.printScenarioHeader(pickle, astScenario, maxLength-scenarioHeaderLength) + + for _, examples := range astScenario.Examples { + max := longestExampleRow(examples, cyan, cyan) + + fmt.Fprintln(f.out, "") + fmt.Fprintln(f.out, s(f.indent*2)+keywordAndName(examples.Keyword, examples.Name)) + + f.printTableHeader(examples.TableHeader, max) + + for _, row := range examples.TableBody { + f.printTableRow(row, max, cyan) + } + } +} + +// Summary renders summary information. +func (f *Pretty) Summary() { + failedStepResults := f.Storage.MustGetPickleStepResultsByStatus(failed) + if len(failedStepResults) > 0 { + fmt.Fprintln(f.out, "\n--- "+red("Failed steps:")+"\n") + + sort.Sort(sortPickleStepResultsByPickleStepID(failedStepResults)) + + for _, fail := range failedStepResults { + pickle := f.Storage.MustGetPickle(fail.PickleID) + pickleStep := f.Storage.MustGetPickleStep(fail.PickleStepID) + feature := f.Storage.MustGetFeature(pickle.Uri) + + astScenario := feature.FindScenario(pickle.AstNodeIds[0]) + scenarioDesc := fmt.Sprintf("%s: %s", astScenario.Keyword, pickle.Name) + + astStep := feature.FindStep(pickleStep.AstNodeIds[0]) + stepDesc := strings.TrimSpace(astStep.Keyword) + " " + pickleStep.Text + + fmt.Fprintln(f.out, s(f.indent)+red(scenarioDesc)+line(feature.Uri, astScenario.Location)) + fmt.Fprintln(f.out, s(f.indent*2)+red(stepDesc)+line(feature.Uri, astStep.Location)) + fmt.Fprintln(f.out, s(f.indent*3)+red("Error: ")+redb(fmt.Sprintf("%+v", fail.Err))+"\n") + } + } + + f.Base.Summary() +} + +func (f *Pretty) printOutlineExample(pickle *messages.Pickle, step *messages.PickleStep, backgroundSteps int) { + var errorMsg string + var clr = green + + feature := f.Storage.MustGetFeature(pickle.Uri) + astScenario := feature.FindScenario(pickle.AstNodeIds[0]) + scenarioHeaderLength, maxLength := f.scenarioLengths(pickle) + + exampleTable, exampleRow := feature.FindExample(pickle.AstNodeIds[1]) + printExampleHeader := exampleTable.TableBody[0].Id == exampleRow.Id + firstExamplesTable := astScenario.Examples[0].Location.Line == exampleTable.Location.Line + + pickleStepResults := f.Storage.MustGetPickleStepResultsByPickleIDUntilStep(pickle.Id, step.Id) + + firstExecutedScenarioStep := len(pickleStepResults) == backgroundSteps+1 + if firstExamplesTable && printExampleHeader && firstExecutedScenarioStep { + f.printScenarioHeader(pickle, astScenario, maxLength-scenarioHeaderLength) + } + + if len(exampleTable.TableBody) == 0 { + // do not print empty examples + return + } + + lastStep := len(pickleStepResults) == len(pickle.Steps) + if !lastStep { + // do not print examples unless all steps has finished + return + } + + for _, result := range pickleStepResults { + // determine example row status + switch { + case result.Status == failed: + errorMsg = result.Err.Error() + clr = result.Status.Color() + case result.Status == ambiguous: + errorMsg = result.Err.Error() + clr = result.Status.Color() + case result.Status == undefined || result.Status == pending: + clr = result.Status.Color() + case result.Status == skipped && clr == nil: + clr = cyan + } + + if firstExamplesTable && printExampleHeader { + // in first example, we need to print steps + + pickleStep := f.Storage.MustGetPickleStep(result.PickleStepID) + astStep := feature.FindStep(pickleStep.AstNodeIds[0]) + + var text = "" + if result.Def != nil { + if m := outlinePlaceholderRegexp.FindAllStringIndex(astStep.Text, -1); len(m) > 0 { + var pos int + for i := 0; i < len(m); i++ { + pair := m[i] + text += cyan(astStep.Text[pos:pair[0]]) + text += cyanb(astStep.Text[pair[0]:pair[1]]) + pos = pair[1] + } + text += cyan(astStep.Text[pos:len(astStep.Text)]) + } else { + text = cyan(astStep.Text) + } + + _, maxLength := f.scenarioLengths(pickle) + stepLength := f.lengthPickleStep(astStep.Keyword, astStep.Text) + + text += s(maxLength - stepLength) + text += " " + blackb("# "+DefinitionID(result.Def)) + } + + // print the step outline + fmt.Fprintln(f.out, s(f.indent*2)+cyan(strings.TrimSpace(astStep.Keyword))+" "+text) + + if pickleStep.Argument != nil { + if table := pickleStep.Argument.DataTable; table != nil { + f.printTable(table, cyan) + } + + if docString := astStep.DocString; docString != nil { + f.printDocString(docString) + } + } + } + } + + max := longestExampleRow(exampleTable, clr, cyan) + + // an example table header + if printExampleHeader { + fmt.Fprintln(f.out, "") + fmt.Fprintln(f.out, s(f.indent*2)+keywordAndName(exampleTable.Keyword, exampleTable.Name)) + + f.printTableHeader(exampleTable.TableHeader, max) + } + + f.printTableRow(exampleRow, max, clr) + + if errorMsg != "" { + fmt.Fprintln(f.out, s(f.indent*4)+redb(errorMsg)) + } +} + +func (f *Pretty) printTableRow(row *messages.TableRow, max []int, clr colors.ColorFunc) { + cells := make([]string, len(row.Cells)) + + for i, cell := range row.Cells { + val := clr(cell.Value) + ln := utf8.RuneCountInString(val) + cells[i] = val + s(max[i]-ln) + } + + fmt.Fprintln(f.out, s(f.indent*3)+"| "+strings.Join(cells, " | ")+" |") +} + +func (f *Pretty) printTableHeader(row *messages.TableRow, max []int) { + f.printTableRow(row, max, cyan) +} + +func isFirstScenarioInRule(rule *messages.Rule, scenario *messages.Scenario) bool { + if rule == nil || scenario == nil { + return false + } + var firstScenario *messages.Scenario + for _, c := range rule.Children { + if c.Scenario != nil { + firstScenario = c.Scenario + break + } + } + return firstScenario != nil && firstScenario.Id == scenario.Id +} + +func isFirstPickleAndNoRule(feature *models.Feature, pickle *messages.Pickle, rule *messages.Rule) bool { + if rule != nil { + return false + } + return feature.Pickles[0].Id == pickle.Id +} + +func (f *Pretty) printStep(pickle *messages.Pickle, pickleStep *messages.PickleStep) { + feature := f.Storage.MustGetFeature(pickle.Uri) + astBackground := feature.FindBackground(pickle.AstNodeIds[0]) + astScenario := feature.FindScenario(pickle.AstNodeIds[0]) + astRule := feature.FindRule(pickle.AstNodeIds[0]) + astStep := feature.FindStep(pickleStep.AstNodeIds[0]) + + var astBackgroundStep bool + var firstExecutedBackgroundStep bool + var backgroundSteps int + + if astBackground != nil { + backgroundSteps = len(astBackground.Steps) + + for idx, step := range astBackground.Steps { + if step.Id == pickleStep.AstNodeIds[0] { + astBackgroundStep = true + firstExecutedBackgroundStep = idx == 0 + break + } + } + } + + firstPickle := isFirstPickleAndNoRule(feature, pickle, astRule) || isFirstScenarioInRule(astRule, astScenario) + + if astBackgroundStep && !firstPickle { + return + } + + if astBackgroundStep && firstExecutedBackgroundStep { + fmt.Fprintln(f.out, "\n"+s(f.indent)+keywordAndName(astBackground.Keyword, astBackground.Name)) + } + + if !astBackgroundStep && len(astScenario.Examples) > 0 { + f.printOutlineExample(pickle, pickleStep, backgroundSteps) + return + } + + scenarioHeaderLength, maxLength := f.scenarioLengths(pickle) + stepLength := f.lengthPickleStep(astStep.Keyword, pickleStep.Text) + + firstExecutedScenarioStep := astScenario.Steps[0].Id == pickleStep.AstNodeIds[0] + if !astBackgroundStep && firstExecutedScenarioStep { + f.printScenarioHeader(pickle, astScenario, maxLength-scenarioHeaderLength) + } + + pickleStepResult := f.Storage.MustGetPickleStepResult(pickleStep.Id) + text := s(f.indent*2) + pickleStepResult.Status.Color()(strings.TrimSpace(astStep.Keyword)) + " " + pickleStepResult.Status.Color()(pickleStep.Text) + if pickleStepResult.Def != nil { + text += s(maxLength - stepLength + 1) + text += blackb("# " + DefinitionID(pickleStepResult.Def)) + } + fmt.Fprintln(f.out, text) + + if pickleStep.Argument != nil { + if table := pickleStep.Argument.DataTable; table != nil { + f.printTable(table, cyan) + } + + if docString := astStep.DocString; docString != nil { + f.printDocString(docString) + } + } + + if pickleStepResult.Err != nil { + fmt.Fprintln(f.out, s(f.indent*2)+redb(fmt.Sprintf("%+v", pickleStepResult.Err))) + } + + if pickleStepResult.Status == pending { + fmt.Fprintln(f.out, s(f.indent*3)+yellow("TODO: write pending definition")) + } +} + +func (f *Pretty) printDocString(docString *messages.DocString) { + var ct string + + if len(docString.MediaType) > 0 { + ct = " " + cyan(docString.MediaType) + } + + fmt.Fprintln(f.out, s(f.indent*3)+cyan(docString.Delimiter)+ct) + + for _, ln := range strings.Split(docString.Content, "\n") { + fmt.Fprintln(f.out, s(f.indent*3)+cyan(ln)) + } + + fmt.Fprintln(f.out, s(f.indent*3)+cyan(docString.Delimiter)) +} + +// print table with aligned table cells +// @TODO: need to make example header cells bold +func (f *Pretty) printTable(t *messages.PickleTable, c colors.ColorFunc) { + maxColLengths := maxColLengths(t, c) + var cols = make([]string, len(t.Rows[0].Cells)) + + for _, row := range t.Rows { + for i, cell := range row.Cells { + val := c(cell.Value) + colLength := utf8.RuneCountInString(val) + cols[i] = val + s(maxColLengths[i]-colLength) + } + + fmt.Fprintln(f.out, s(f.indent*3)+"| "+strings.Join(cols, " | ")+" |") + } +} + +// longest gives a list of longest columns of all rows in Table +func maxColLengths(t *messages.PickleTable, clrs ...colors.ColorFunc) []int { + if t == nil { + return []int{} + } + + longest := make([]int, len(t.Rows[0].Cells)) + for _, row := range t.Rows { + for i, cell := range row.Cells { + for _, c := range clrs { + ln := utf8.RuneCountInString(c(cell.Value)) + if longest[i] < ln { + longest[i] = ln + } + } + + ln := utf8.RuneCountInString(cell.Value) + if longest[i] < ln { + longest[i] = ln + } + } + } + + return longest +} + +func longestExampleRow(t *messages.Examples, clrs ...colors.ColorFunc) []int { + if t == nil { + return []int{} + } + + longest := make([]int, len(t.TableHeader.Cells)) + for i, cell := range t.TableHeader.Cells { + for _, c := range clrs { + ln := utf8.RuneCountInString(c(cell.Value)) + if longest[i] < ln { + longest[i] = ln + } + } + + ln := utf8.RuneCountInString(cell.Value) + if longest[i] < ln { + longest[i] = ln + } + } + + for _, row := range t.TableBody { + for i, cell := range row.Cells { + for _, c := range clrs { + ln := utf8.RuneCountInString(c(cell.Value)) + if longest[i] < ln { + longest[i] = ln + } + } + + ln := utf8.RuneCountInString(cell.Value) + if longest[i] < ln { + longest[i] = ln + } + } + } + + return longest +} + +func (f *Pretty) longestStep(steps []*messages.Step, pickleLength int) int { + max := pickleLength + + for _, step := range steps { + length := f.lengthPickleStep(step.Keyword, step.Text) + if length > max { + max = length + } + } + + return max +} + +// a line number representation in feature file +func line(path string, loc *messages.Location) string { + // Path can contain a line number already. + // This line number has to be trimmed to avoid duplication. + path = strings.TrimSuffix(path, fmt.Sprintf(":%d", loc.Line)) + return " " + blackb(fmt.Sprintf("# %s:%d", path, loc.Line)) +} + +func (f *Pretty) lengthPickleStep(keyword, text string) int { + return f.indent*2 + utf8.RuneCountInString(strings.TrimSpace(keyword)+" "+text) +} + +func (f *Pretty) lengthPickle(keyword, name string) int { + return f.indent + utf8.RuneCountInString(strings.TrimSpace(keyword)+": "+name) +} diff --git a/vendor/github.com/cucumber/godog/internal/formatters/fmt_progress.go b/vendor/github.com/cucumber/godog/internal/formatters/fmt_progress.go new file mode 100644 index 000000000..9722ef7a5 --- /dev/null +++ b/vendor/github.com/cucumber/godog/internal/formatters/fmt_progress.go @@ -0,0 +1,172 @@ +package formatters + +import ( + "fmt" + "io" + "math" + "sort" + "strings" + + "github.com/cucumber/godog/formatters" + messages "github.com/cucumber/messages/go/v21" +) + +func init() { + formatters.Format("progress", "Prints a character per step.", ProgressFormatterFunc) +} + +// ProgressFormatterFunc implements the FormatterFunc for the progress formatter. +func ProgressFormatterFunc(suite string, out io.Writer) formatters.Formatter { + return NewProgress(suite, out) +} + +// NewProgress creates a new progress formatter. +func NewProgress(suite string, out io.Writer) *Progress { + steps := 0 + return &Progress{ + Base: NewBase(suite, out), + StepsPerRow: 70, + Steps: &steps, + } +} + +// Progress is a minimalistic formatter. +type Progress struct { + *Base + StepsPerRow int + Steps *int +} + +// Summary renders summary information. +func (f *Progress) Summary() { + left := math.Mod(float64(*f.Steps), float64(f.StepsPerRow)) + if left != 0 { + if *f.Steps > f.StepsPerRow { + fmt.Fprintf(f.out, s(f.StepsPerRow-int(left))+fmt.Sprintf(" %d\n", *f.Steps)) + } else { + fmt.Fprintf(f.out, " %d\n", *f.Steps) + } + } + + var failedStepsOutput []string + + failedSteps := f.Storage.MustGetPickleStepResultsByStatus(failed) + sort.Sort(sortPickleStepResultsByPickleStepID(failedSteps)) + + for _, sr := range failedSteps { + if sr.Status == failed { + pickle := f.Storage.MustGetPickle(sr.PickleID) + pickleStep := f.Storage.MustGetPickleStep(sr.PickleStepID) + feature := f.Storage.MustGetFeature(pickle.Uri) + + sc := feature.FindScenario(pickle.AstNodeIds[0]) + scenarioDesc := fmt.Sprintf("%s: %s", sc.Keyword, pickle.Name) + scenarioLine := fmt.Sprintf("%s:%d", pickle.Uri, sc.Location.Line) + + step := feature.FindStep(pickleStep.AstNodeIds[0]) + stepDesc := strings.TrimSpace(step.Keyword) + " " + pickleStep.Text + stepLine := fmt.Sprintf("%s:%d", pickle.Uri, step.Location.Line) + + failedStepsOutput = append( + failedStepsOutput, + s(2)+red(scenarioDesc)+blackb(" # "+scenarioLine), + s(4)+red(stepDesc)+blackb(" # "+stepLine), + s(6)+red("Error: ")+redb(fmt.Sprintf("%+v", sr.Err)), + "", + ) + } + } + + if len(failedStepsOutput) > 0 { + fmt.Fprintln(f.out, "\n\n--- "+red("Failed steps:")+"\n") + fmt.Fprint(f.out, strings.Join(failedStepsOutput, "\n")) + } + fmt.Fprintln(f.out, "") + + f.Base.Summary() +} + +func (f *Progress) step(pickleStepID string) { + pickleStepResult := f.Storage.MustGetPickleStepResult(pickleStepID) + + switch pickleStepResult.Status { + case passed: + fmt.Fprint(f.out, green(".")) + case skipped: + fmt.Fprint(f.out, cyan("-")) + case failed: + fmt.Fprint(f.out, red("F")) + case undefined: + fmt.Fprint(f.out, yellow("U")) + case ambiguous: + fmt.Fprint(f.out, yellow("A")) + case pending: + fmt.Fprint(f.out, yellow("P")) + } + + *f.Steps++ + + if math.Mod(float64(*f.Steps), float64(f.StepsPerRow)) == 0 { + fmt.Fprintf(f.out, " %d\n", *f.Steps) + } +} + +// Passed captures passed step. +func (f *Progress) Passed(pickle *messages.Pickle, step *messages.PickleStep, match *formatters.StepDefinition) { + f.Base.Passed(pickle, step, match) + + f.Lock.Lock() + defer f.Lock.Unlock() + + f.step(step.Id) +} + +// Skipped captures skipped step. +func (f *Progress) Skipped(pickle *messages.Pickle, step *messages.PickleStep, match *formatters.StepDefinition) { + f.Base.Skipped(pickle, step, match) + + f.Lock.Lock() + defer f.Lock.Unlock() + + f.step(step.Id) +} + +// Undefined captures undefined step. +func (f *Progress) Undefined(pickle *messages.Pickle, step *messages.PickleStep, match *formatters.StepDefinition) { + f.Base.Undefined(pickle, step, match) + + f.Lock.Lock() + defer f.Lock.Unlock() + + f.step(step.Id) +} + +// Failed captures failed step. +func (f *Progress) Failed(pickle *messages.Pickle, step *messages.PickleStep, match *formatters.StepDefinition, err error) { + f.Base.Failed(pickle, step, match, err) + + f.Lock.Lock() + defer f.Lock.Unlock() + + f.step(step.Id) +} + +// Ambiguous steps. +func (f *Progress) Ambiguous(pickle *messages.Pickle, step *messages.PickleStep, match *formatters.StepDefinition, err error) { + f.Base.Ambiguous(pickle, step, match, err) + + f.Lock.Lock() + defer f.Lock.Unlock() + + f.step(step.Id) +} + +// Pending captures pending step. +func (f *Progress) Pending(pickle *messages.Pickle, step *messages.PickleStep, match *formatters.StepDefinition) { + f.Base.Pending(pickle, step, match) + + f.Lock.Lock() + defer f.Lock.Unlock() + + f.step(step.Id) +} diff --git a/vendor/github.com/cucumber/godog/internal/formatters/undefined_snippets_gen.go b/vendor/github.com/cucumber/godog/internal/formatters/undefined_snippets_gen.go new file mode 100644 index 000000000..ff6cd79ef --- /dev/null +++ b/vendor/github.com/cucumber/godog/internal/formatters/undefined_snippets_gen.go @@ -0,0 +1,108 @@ +package formatters + +import ( + "fmt" + "reflect" + "regexp" + "strings" + "text/template" + + messages "github.com/cucumber/messages/go/v21" +) + +// some snippet formatting regexps +var snippetExprCleanup = regexp.MustCompile(`([\/\[\]\(\)\\^\$\.\|\?\*\+\'])`) +var snippetExprQuoted = regexp.MustCompile(`(\W|^)"(?:[^"]*)"(\W|$)`) +var snippetMethodName = regexp.MustCompile(`[^a-zA-Z\_\ ]`) +var snippetNumbers = regexp.MustCompile(`(\d+)`) + +var snippetHelperFuncs = template.FuncMap{ + "backticked": func(s string) string { + return "`" + s + "`" + }, +} + +var undefinedSnippetsTpl = template.Must(template.New("snippets").Funcs(snippetHelperFuncs).Parse(` +{{ range . }}func {{ .Method }}({{ .Args }}) error { + return godog.ErrPending +} + +{{end}}func InitializeScenario(ctx *godog.ScenarioContext) { {{ range . }} + ctx.Step({{ backticked .Expr }}, {{ .Method }}){{end}} +} +`)) + +type undefinedSnippet struct { + Method string + Expr string + argument *messages.PickleStepArgument +} + +func (s undefinedSnippet) Args() (ret string) { + var ( + args []string + pos int + breakLoop bool + ) + + for !breakLoop { + part := s.Expr[pos:] + ipos := strings.Index(part, "(\\d+)") + spos := strings.Index(part, "\"([^\"]*)\"") + + switch { + case spos == -1 && ipos == -1: + breakLoop = true + case spos == -1: + pos += ipos + len("(\\d+)") + args = append(args, reflect.Int.String()) + case ipos == -1: + pos += spos + len("\"([^\"]*)\"") + args = append(args, reflect.String.String()) + case ipos < spos: + pos += ipos + len("(\\d+)") + args = append(args, reflect.Int.String()) + case spos < ipos: + pos += spos + len("\"([^\"]*)\"") + args = append(args, reflect.String.String()) + } + } + + if s.argument != nil { + if s.argument.DocString != nil { + args = append(args, "*godog.DocString") + } + + if s.argument.DataTable != nil { + args = append(args, "*godog.Table") + } + } + + var last string + + for i, arg := range args { + if last == "" || last == arg { + ret += fmt.Sprintf("arg%d, ", i+1) + } else { + ret = strings.TrimRight(ret, ", ") + fmt.Sprintf(" %s, arg%d, ", last, i+1) + } + + last = arg + } + + return strings.TrimSpace(strings.TrimRight(ret, ", ") + " " + last) +} + +type snippetSortByMethod []undefinedSnippet + +func (s snippetSortByMethod) Len() int { + return len(s) +} + +func (s snippetSortByMethod) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s snippetSortByMethod) Less(i, j int) bool { + return s[i].Method < s[j].Method +} diff --git a/vendor/github.com/cucumber/godog/internal/models/feature.go b/vendor/github.com/cucumber/godog/internal/models/feature.go new file mode 100644 index 000000000..9d9d84da7 --- /dev/null +++ b/vendor/github.com/cucumber/godog/internal/models/feature.go @@ -0,0 +1,151 @@ +package models + +import ( + messages "github.com/cucumber/messages/go/v21" +) + +// Feature is an internal object to group together +// the parsed gherkin document, the pickles and the +// raw content. +type Feature struct { + *messages.GherkinDocument + Pickles []*messages.Pickle + Content []byte +} + +// FindRule returns the rule to which the given scenario belongs +func (f Feature) FindRule(astScenarioID string) *messages.Rule { + for _, child := range f.GherkinDocument.Feature.Children { + if ru := child.Rule; ru != nil { + if rc := child.Rule; rc != nil { + for _, rcc := range rc.Children { + if sc := rcc.Scenario; sc != nil && sc.Id == astScenarioID { + return ru + } + } + } + } + } + return nil +} + +// FindScenario returns the scenario in the feature or in a rule in the feature +func (f Feature) FindScenario(astScenarioID string) *messages.Scenario { + for _, child := range f.GherkinDocument.Feature.Children { + if sc := child.Scenario; sc != nil && sc.Id == astScenarioID { + return sc + } + if rc := child.Rule; rc != nil { + for _, rcc := range rc.Children { + if sc := rcc.Scenario; sc != nil && sc.Id == astScenarioID { + return sc + } + } + } + } + + return nil +} + +// FindBackground ... +func (f Feature) FindBackground(astScenarioID string) *messages.Background { + var bg *messages.Background + + for _, child := range f.GherkinDocument.Feature.Children { + if tmp := child.Background; tmp != nil { + bg = tmp + } + + if sc := child.Scenario; sc != nil && sc.Id == astScenarioID { + return bg + } + + if ru := child.Rule; ru != nil { + for _, rc := range ru.Children { + if tmp := rc.Background; tmp != nil { + bg = tmp + } + + if sc := rc.Scenario; sc != nil && sc.Id == astScenarioID { + return bg + } + } + } + } + + return nil +} + +// FindExample ... +func (f Feature) FindExample(exampleAstID string) (*messages.Examples, *messages.TableRow) { + for _, child := range f.GherkinDocument.Feature.Children { + if sc := child.Scenario; sc != nil { + for _, example := range sc.Examples { + for _, row := range example.TableBody { + if row.Id == exampleAstID { + return example, row + } + } + } + } + if ru := child.Rule; ru != nil { + for _, rc := range ru.Children { + if sc := rc.Scenario; sc != nil { + for _, example := range sc.Examples { + for _, row := range example.TableBody { + if row.Id == exampleAstID { + return example, row + } + } + } + } + } + } + } + + return nil, nil +} + +// FindStep ... +func (f Feature) FindStep(astStepID string) *messages.Step { + for _, child := range f.GherkinDocument.Feature.Children { + + if ru := child.Rule; ru != nil { + for _, ch := range ru.Children { + if sc := ch.Scenario; sc != nil { + for _, step := range sc.Steps { + if step.Id == astStepID { + return step + } + } + } + + if bg := ch.Background; bg != nil { + for _, step := range bg.Steps { + if step.Id == astStepID { + return step + } + } + } + } + } + + if sc := child.Scenario; sc != nil { + for _, step := range sc.Steps { + if step.Id == astStepID { + return step + } + } + } + + if bg := child.Background; bg != nil { + for _, step := range bg.Steps { + if step.Id == astStepID { + return step + } + } + } + } + + return nil +} diff --git a/vendor/github.com/cucumber/godog/internal/models/results.go b/vendor/github.com/cucumber/godog/internal/models/results.go new file mode 100644 index 000000000..9c7f98d7f --- /dev/null +++ b/vendor/github.com/cucumber/godog/internal/models/results.go @@ -0,0 +1,111 @@ +package models + +import ( + "time" + + "github.com/cucumber/godog/colors" + "github.com/cucumber/godog/internal/utils" +) + +// TestRunStarted ... +type TestRunStarted struct { + StartedAt time.Time +} + +// PickleResult ... +type PickleResult struct { + PickleID string + StartedAt time.Time +} + +// PickleAttachment ... +type PickleAttachment struct { + Name string + MimeType string + Data []byte +} + +// PickleStepResult ... +type PickleStepResult struct { + Status StepResultStatus + FinishedAt time.Time + Err error + + PickleID string + PickleStepID string + + Def *StepDefinition + + Attachments []PickleAttachment +} + +// NewStepResult ... +func NewStepResult( + status StepResultStatus, + pickleID, pickleStepID string, + match *StepDefinition, + attachments []PickleAttachment, + err error, +) PickleStepResult { + return PickleStepResult{ + Status: status, + FinishedAt: utils.TimeNowFunc(), + Err: err, + PickleID: pickleID, + PickleStepID: pickleStepID, + Def: match, + Attachments: attachments, + } +} + +// StepResultStatus ... +type StepResultStatus int + +const ( + // Passed ... + Passed StepResultStatus = iota + // Failed ... + Failed + // Skipped ... + Skipped + // Undefined ... + Undefined + // Pending ... + Pending + // Ambiguous ... + Ambiguous +) + +// Color ... +func (st StepResultStatus) Color() colors.ColorFunc { + switch st { + case Passed: + return colors.Green + case Failed: + return colors.Red + case Skipped: + return colors.Cyan + default: + return colors.Yellow + } +} + +// String ... +func (st StepResultStatus) String() string { + switch st { + case Passed: + return "passed" + case Failed: + return "failed" + case Skipped: + return "skipped" + case Undefined: + return "undefined" + case Pending: + return "pending" + case Ambiguous: + return "ambiguous" + default: + return "unknown" + } +} diff --git a/vendor/github.com/cucumber/godog/internal/models/stepdef.go b/vendor/github.com/cucumber/godog/internal/models/stepdef.go new file mode 100644 index 000000000..7c2e973ac --- /dev/null +++ b/vendor/github.com/cucumber/godog/internal/models/stepdef.go @@ -0,0 +1,309 @@ +package models + +import ( + "context" + "errors" + "fmt" + "reflect" + "strconv" + + messages "github.com/cucumber/messages/go/v21" + + "github.com/cucumber/godog/formatters" +) + +var typeOfBytes = reflect.TypeOf([]byte(nil)) + +// matchable errors +var ( + ErrUnmatchedStepArgumentNumber = errors.New("func expected more arguments than given") + ErrCannotConvert = errors.New("cannot convert argument") + ErrUnsupportedParameterType = errors.New("func has unsupported parameter type") +) + +// StepDefinition ... +type StepDefinition struct { + formatters.StepDefinition + + Args []interface{} + HandlerValue reflect.Value + File string + Line int + + // multistep related + Nested bool + Undefined []string +} + +var typeOfContext = reflect.TypeOf((*context.Context)(nil)).Elem() + +// Run a step with the matched arguments using reflect +// Returns one of ... +// (context, error) +// (context, godog.Steps) +func (sd *StepDefinition) Run(ctx context.Context) (context.Context, interface{}) { + var values []reflect.Value + + typ := sd.HandlerValue.Type() + numIn := typ.NumIn() + hasCtxIn := numIn > 0 && typ.In(0).Implements(typeOfContext) + ctxOffset := 0 + + if hasCtxIn { + values = append(values, reflect.ValueOf(ctx)) + ctxOffset = 1 + numIn-- + } + + if len(sd.Args) < numIn { + return ctx, fmt.Errorf("%w: expected %d arguments, matched %d from step", ErrUnmatchedStepArgumentNumber, numIn, len(sd.Args)) + } + + for i := 0; i < numIn; i++ { + param := typ.In(i + ctxOffset) + switch param.Kind() { + case reflect.Int: + s, err := sd.shouldBeString(i) + if err != nil { + return ctx, err + } + v, err := strconv.ParseInt(s, 10, 0) + if err != nil { + return ctx, fmt.Errorf(`%w %d: "%s" to int: %s`, ErrCannotConvert, i, s, err) + } + values = append(values, reflect.ValueOf(int(v))) + case reflect.Int64: + s, err := sd.shouldBeString(i) + if err != nil { + return ctx, err + } + v, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return ctx, fmt.Errorf(`%w %d: "%s" to int64: %s`, ErrCannotConvert, i, s, err) + } + values = append(values, reflect.ValueOf(v)) + case reflect.Int32: + s, err := sd.shouldBeString(i) + if err != nil { + return ctx, err + } + v, err := strconv.ParseInt(s, 10, 32) + if err != nil { + return ctx, fmt.Errorf(`%w %d: "%s" to int32: %s`, ErrCannotConvert, i, s, err) + } + values = append(values, reflect.ValueOf(int32(v))) + case reflect.Int16: + s, err := sd.shouldBeString(i) + if err != nil { + return ctx, err + } + v, err := strconv.ParseInt(s, 10, 16) + if err != nil { + return ctx, fmt.Errorf(`%w %d: "%s" to int16: %s`, ErrCannotConvert, i, s, err) + } + values = append(values, reflect.ValueOf(int16(v))) + case reflect.Int8: + s, err := sd.shouldBeString(i) + if err != nil { + return ctx, err + } + v, err := strconv.ParseInt(s, 10, 8) + if err != nil { + return ctx, fmt.Errorf(`%w %d: "%s" to int8: %s`, ErrCannotConvert, i, s, err) + } + values = append(values, reflect.ValueOf(int8(v))) + case reflect.Uint: + s, err := sd.shouldBeString(i) + if err != nil { + return ctx, err + } + v, err := strconv.ParseUint(s, 10, 0) + if err != nil { + return ctx, fmt.Errorf(`%w %d: "%s" to uint: %s`, ErrCannotConvert, i, s, err) + } + values = append(values, reflect.ValueOf(uint(v))) + case reflect.Uint64: + s, err := sd.shouldBeString(i) + if err != nil { + return ctx, err + } + v, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return ctx, fmt.Errorf(`%w %d: "%s" to uint64: %s`, ErrCannotConvert, i, s, err) + } + values = append(values, reflect.ValueOf(v)) + case reflect.Uint32: + s, err := sd.shouldBeString(i) + if err != nil { + return ctx, err + } + v, err := strconv.ParseUint(s, 10, 32) + if err != nil { + return ctx, fmt.Errorf(`%w %d: "%s" to uint32: %s`, ErrCannotConvert, i, s, err) + } + values = append(values, reflect.ValueOf(uint32(v))) + case reflect.Uint16: + s, err := sd.shouldBeString(i) + if err != nil { + return ctx, err + } + v, err := strconv.ParseUint(s, 10, 16) + if err != nil { + return ctx, fmt.Errorf(`%w %d: "%s" to uint16: %s`, ErrCannotConvert, i, s, err) + } + values = append(values, reflect.ValueOf(uint16(v))) + case reflect.Uint8: + s, err := sd.shouldBeString(i) + if err != nil { + return ctx, err + } + v, err := strconv.ParseUint(s, 10, 8) + if err != nil { + return ctx, fmt.Errorf(`%w %d: "%s" to uint8: %s`, ErrCannotConvert, i, s, err) + } + values = append(values, reflect.ValueOf(uint8(v))) + case reflect.String: + s, err := sd.shouldBeString(i) + if err != nil { + return ctx, err + } + values = append(values, reflect.ValueOf(s)) + case reflect.Float64: + s, err := sd.shouldBeString(i) + if err != nil { + return ctx, err + } + v, err := strconv.ParseFloat(s, 64) + if err != nil { + return ctx, fmt.Errorf(`%w %d: "%s" to float64: %s`, ErrCannotConvert, i, s, err) + } + values = append(values, reflect.ValueOf(v)) + case reflect.Float32: + s, err := sd.shouldBeString(i) + if err != nil { + return ctx, err + } + v, err := strconv.ParseFloat(s, 32) + if err != nil { + return ctx, fmt.Errorf(`%w %d: "%s" to float32: %s`, ErrCannotConvert, i, s, err) + } + values = append(values, reflect.ValueOf(float32(v))) + case reflect.Ptr: + arg := sd.Args[i] + switch param.Elem().String() { + case "messages.PickleDocString": + if v, ok := arg.(*messages.PickleStepArgument); ok { + values = append(values, reflect.ValueOf(v.DocString)) + break + } + + if v, ok := arg.(*messages.PickleDocString); ok { + values = append(values, reflect.ValueOf(v)) + break + } + + return ctx, fmt.Errorf(`%w %d: "%v" of type "%T" to *messages.PickleDocString`, ErrCannotConvert, i, arg, arg) + case "messages.PickleTable": + if v, ok := arg.(*messages.PickleStepArgument); ok { + values = append(values, reflect.ValueOf(v.DataTable)) + break + } + + if v, ok := arg.(*messages.PickleTable); ok { + values = append(values, reflect.ValueOf(v)) + break + } + + return ctx, fmt.Errorf(`%w %d: "%v" of type "%T" to *messages.PickleTable`, ErrCannotConvert, i, arg, arg) + default: + // the error here is that the declared function has an unsupported param type - really this ought to be trapped at registration ti,e + return ctx, fmt.Errorf("%w: the data type of parameter %d type *%s is not supported", ErrUnsupportedParameterType, i, param.Elem().String()) + } + case reflect.Slice: + switch param { + case typeOfBytes: + s, err := sd.shouldBeString(i) + if err != nil { + return ctx, err + } + values = append(values, reflect.ValueOf([]byte(s))) + default: + // the problem is the function decl is not using a support slice type as the param + return ctx, fmt.Errorf("%w: the slice parameter %d type []%s is not supported", ErrUnsupportedParameterType, i, param.Elem().Kind()) + } + case reflect.Struct: + return ctx, fmt.Errorf("%w: the struct parameter %d type %s is not supported", ErrUnsupportedParameterType, i, param.String()) + default: + return ctx, fmt.Errorf("%w: the parameter %d type %s is not supported", ErrUnsupportedParameterType, i, param.Kind()) + } + } + + res := sd.HandlerValue.Call(values) + if len(res) == 0 { + return ctx, nil + } + + // Note that the step fn return types were validated at Initialise in test_context.go stepWithKeyword() + + // single return value may be one of ... + // error + // context.Context + // godog.Steps + result0 := res[0].Interface() + if len(res) == 1 { + + // if the single return value is a context then just return it + if ctx, ok := result0.(context.Context); ok { + return ctx, nil + } + + // return type is presumably one of nil, "error" or "Steps" so place it into second return position + return ctx, result0 + } + + // multi-value value return must be + // (context, error) and the context value must not be nil + if ctx, ok := result0.(context.Context); ok { + return ctx, res[1].Interface() + } + + result1 := res[1].Interface() + errMsg := "" + if result1 != nil { + errMsg = fmt.Sprintf(", step def also returned an error: %v", result1) + } + + text := sd.StepDefinition.Expr.String() + + if result0 == nil { + panic(fmt.Sprintf("step definition '%v' with return type (context.Context, error) must not return for the context.Context value%s", text, errMsg)) + } + + panic(fmt.Errorf("step definition '%v' has return type (context.Context, error), but found %v rather than a context.Context value%s", text, result0, errMsg)) +} + +func (sd *StepDefinition) shouldBeString(idx int) (string, error) { + arg := sd.Args[idx] + switch arg := arg.(type) { + case string: + return arg, nil + case *messages.PickleStepArgument: + if arg.DocString == nil { + return "", fmt.Errorf(`%w %d: "%v" of type "%T": DocString is not set`, ErrCannotConvert, idx, arg, arg) + } + return arg.DocString.Content, nil + case *messages.PickleDocString: + return arg.Content, nil + default: + return "", fmt.Errorf(`%w %d: "%v" of type "%T" to string`, ErrCannotConvert, idx, arg, arg) + } +} + +// GetInternalStepDefinition ... +func (sd *StepDefinition) GetInternalStepDefinition() *formatters.StepDefinition { + if sd == nil { + return nil + } + + return &sd.StepDefinition +} diff --git a/vendor/github.com/cucumber/godog/internal/parser/parser.go b/vendor/github.com/cucumber/godog/internal/parser/parser.go new file mode 100644 index 000000000..f607000aa --- /dev/null +++ b/vendor/github.com/cucumber/godog/internal/parser/parser.go @@ -0,0 +1,243 @@ +package parser + +import ( + "bytes" + "fmt" + "io" + "io/fs" + "os" + "regexp" + "strconv" + "strings" + + gherkin "github.com/cucumber/gherkin/go/v26" + messages "github.com/cucumber/messages/go/v21" + + "github.com/cucumber/godog/internal/flags" + "github.com/cucumber/godog/internal/models" + "github.com/cucumber/godog/internal/tags" +) + +var pathLineRe = regexp.MustCompile(`:([\d]+)$`) + +// ExtractFeaturePathLine ... +func ExtractFeaturePathLine(p string) (string, int) { + line := -1 + retPath := p + if m := pathLineRe.FindStringSubmatch(p); len(m) > 0 { + if i, err := strconv.Atoi(m[1]); err == nil { + line = i + retPath = p[:strings.LastIndexByte(p, ':')] + } + } + return retPath, line +} + +func parseFeatureFile(fsys fs.FS, path, dialect string, newIDFunc func() string) (*models.Feature, error) { + reader, err := fsys.Open(path) + if err != nil { + return nil, err + } + + defer reader.Close() + + var buf bytes.Buffer + gherkinDocument, err := gherkin.ParseGherkinDocumentForLanguage(io.TeeReader(reader, &buf), dialect, newIDFunc) + if err != nil { + return nil, fmt.Errorf("%s - %v", path, err) + } + + gherkinDocument.Uri = path + pickles := gherkin.Pickles(*gherkinDocument, path, newIDFunc) + + f := models.Feature{GherkinDocument: gherkinDocument, Pickles: pickles, Content: buf.Bytes()} + return &f, nil +} + +func parseBytes(path string, feature []byte, dialect string, newIDFunc func() string) (*models.Feature, error) { + reader := bytes.NewReader(feature) + + var buf bytes.Buffer + gherkinDocument, err := gherkin.ParseGherkinDocumentForLanguage(io.TeeReader(reader, &buf), dialect, newIDFunc) + if err != nil { + return nil, fmt.Errorf("%s - %v", path, err) + } + + gherkinDocument.Uri = path + pickles := gherkin.Pickles(*gherkinDocument, path, newIDFunc) + + f := models.Feature{GherkinDocument: gherkinDocument, Pickles: pickles, Content: buf.Bytes()} + return &f, nil +} + +func parseFeatureDir(fsys fs.FS, dir, dialect string, newIDFunc func() string) ([]*models.Feature, error) { + var features []*models.Feature + return features, fs.WalkDir(fsys, dir, func(p string, f fs.DirEntry, err error) error { + if err != nil { + return err + } + + if f.IsDir() { + return nil + } + + if !strings.HasSuffix(p, ".feature") { + return nil + } + + feat, err := parseFeatureFile(fsys, p, dialect, newIDFunc) + if err != nil { + return err + } + + features = append(features, feat) + return nil + }) +} + +func parsePath(fsys fs.FS, path, dialect string, newIDFunc func() string) ([]*models.Feature, error) { + var features []*models.Feature + + path, line := ExtractFeaturePathLine(path) + + fi, err := func() (fs.FileInfo, error) { + file, err := fsys.Open(path) + if err != nil { + return nil, err + } + defer file.Close() + + return file.Stat() + }() + if err != nil { + return features, err + } + + if fi.IsDir() { + return parseFeatureDir(fsys, path, dialect, newIDFunc) + } + + ft, err := parseFeatureFile(fsys, path, dialect, newIDFunc) + if err != nil { + return features, err + } + + // filter scenario by line number + var pickles []*messages.Pickle + + if line != -1 { + ft.Uri += ":" + strconv.Itoa(line) + } + + for _, pickle := range ft.Pickles { + sc := ft.FindScenario(pickle.AstNodeIds[0]) + + if line == -1 || int64(line) == sc.Location.Line { + if line != -1 { + pickle.Uri += ":" + strconv.Itoa(line) + } + + pickles = append(pickles, pickle) + } + } + ft.Pickles = pickles + + return append(features, ft), nil +} + +// ParseFeatures ... +func ParseFeatures(fsys fs.FS, filter, dialect string, paths []string) ([]*models.Feature, error) { + var order int + + if dialect == "" { + dialect = gherkin.DefaultDialect + } + + featureIdxs := make(map[string]int) + uniqueFeatureURI := make(map[string]*models.Feature) + newIDFunc := (&messages.Incrementing{}).NewId + for _, path := range paths { + feats, err := parsePath(fsys, path, dialect, newIDFunc) + + switch { + case os.IsNotExist(err): + return nil, fmt.Errorf(`feature path "%s" is not available`, path) + case os.IsPermission(err): + return nil, fmt.Errorf(`feature path "%s" is not accessible`, path) + case err != nil: + return nil, err + } + + for _, ft := range feats { + if _, duplicate := uniqueFeatureURI[ft.Uri]; duplicate { + continue + } + + uniqueFeatureURI[ft.Uri] = ft + featureIdxs[ft.Uri] = order + + order++ + } + } + + var features = make([]*models.Feature, len(uniqueFeatureURI)) + for uri, feature := range uniqueFeatureURI { + idx := featureIdxs[uri] + features[idx] = feature + } + + features = filterFeatures(filter, features) + + return features, nil +} + +type FeatureContent = flags.Feature + +func ParseFromBytes(filter, dialect string, featuresInputs []FeatureContent) ([]*models.Feature, error) { + var order int + + if dialect == "" { + dialect = gherkin.DefaultDialect + } + + featureIdxs := make(map[string]int) + uniqueFeatureURI := make(map[string]*models.Feature) + newIDFunc := (&messages.Incrementing{}).NewId + for _, f := range featuresInputs { + ft, err := parseBytes(f.Name, f.Contents, dialect, newIDFunc) + if err != nil { + return nil, err + } + + if _, duplicate := uniqueFeatureURI[ft.Uri]; duplicate { + continue + } + + uniqueFeatureURI[ft.Uri] = ft + featureIdxs[ft.Uri] = order + + order++ + } + + var features = make([]*models.Feature, len(uniqueFeatureURI)) + for uri, feature := range uniqueFeatureURI { + idx := featureIdxs[uri] + features[idx] = feature + } + + features = filterFeatures(filter, features) + + return features, nil +} + +func filterFeatures(filter string, features []*models.Feature) (result []*models.Feature) { + for _, ft := range features { + ft.Pickles = tags.ApplyTagFilter(filter, ft.Pickles) + + if ft.Feature != nil && len(ft.Pickles) > 0 { + result = append(result, ft) + } + } + + return +} diff --git a/vendor/github.com/cucumber/godog/internal/storage/fs.go b/vendor/github.com/cucumber/godog/internal/storage/fs.go new file mode 100644 index 000000000..333c61def --- /dev/null +++ b/vendor/github.com/cucumber/godog/internal/storage/fs.go @@ -0,0 +1,21 @@ +package storage + +import ( + "io/fs" + "os" +) + +// FS is a wrapper that falls back to `os`. +type FS struct { + FS fs.FS +} + +// Open a file in the provided `fs.FS`. If none provided, +// open via `os.Open` +func (f FS) Open(name string) (fs.File, error) { + if f.FS == nil { + return os.Open(name) + } + + return f.FS.Open(name) +} diff --git a/vendor/github.com/cucumber/godog/internal/storage/storage.go b/vendor/github.com/cucumber/godog/internal/storage/storage.go new file mode 100644 index 000000000..72b7e86f7 --- /dev/null +++ b/vendor/github.com/cucumber/godog/internal/storage/storage.go @@ -0,0 +1,338 @@ +package storage + +import ( + "fmt" + "sync" + + messages "github.com/cucumber/messages/go/v21" + "github.com/hashicorp/go-memdb" + + "github.com/cucumber/godog/internal/models" +) + +const ( + writeMode bool = true + readMode bool = false + + tableFeature string = "feature" + tableFeatureIndexURI string = "id" + + tablePickle string = "pickle" + tablePickleIndexID string = "id" + tablePickleIndexURI string = "uri" + + tablePickleStep string = "pickle_step" + tablePickleStepIndexID string = "id" + + tablePickleResult string = "pickle_result" + tablePickleResultIndexPickleID string = "id" + + tablePickleStepResult string = "pickle_step_result" + tablePickleStepResultIndexPickleStepID string = "id" + tablePickleStepResultIndexPickleID string = "pickle_id" + tablePickleStepResultIndexStatus string = "status" + + tableStepDefintionMatch string = "step_defintion_match" + tableStepDefintionMatchIndexStepID string = "id" +) + +// Storage is a thread safe in-mem storage +type Storage struct { + db *memdb.MemDB + + testRunStarted models.TestRunStarted + testRunStartedLock *sync.Mutex +} + +// NewStorage will create an in-mem storage that +// is used across concurrent runners and formatters +func NewStorage() *Storage { + schema := memdb.DBSchema{ + Tables: map[string]*memdb.TableSchema{ + tableFeature: { + Name: tableFeature, + Indexes: map[string]*memdb.IndexSchema{ + tableFeatureIndexURI: { + Name: tableFeatureIndexURI, + Unique: true, + Indexer: &memdb.StringFieldIndex{Field: "Uri"}, + }, + }, + }, + tablePickle: { + Name: tablePickle, + Indexes: map[string]*memdb.IndexSchema{ + tablePickleIndexID: { + Name: tablePickleIndexID, + Unique: true, + Indexer: &memdb.StringFieldIndex{Field: "Id"}, + }, + tablePickleIndexURI: { + Name: tablePickleIndexURI, + Unique: false, + Indexer: &memdb.StringFieldIndex{Field: "Uri"}, + }, + }, + }, + tablePickleStep: { + Name: tablePickleStep, + Indexes: map[string]*memdb.IndexSchema{ + tablePickleStepIndexID: { + Name: tablePickleStepIndexID, + Unique: true, + Indexer: &memdb.StringFieldIndex{Field: "Id"}, + }, + }, + }, + tablePickleResult: { + Name: tablePickleResult, + Indexes: map[string]*memdb.IndexSchema{ + tablePickleResultIndexPickleID: { + Name: tablePickleResultIndexPickleID, + Unique: true, + Indexer: &memdb.StringFieldIndex{Field: "PickleID"}, + }, + }, + }, + tablePickleStepResult: { + Name: tablePickleStepResult, + Indexes: map[string]*memdb.IndexSchema{ + tablePickleStepResultIndexPickleStepID: { + Name: tablePickleStepResultIndexPickleStepID, + Unique: true, + Indexer: &memdb.StringFieldIndex{Field: "PickleStepID"}, + }, + tablePickleStepResultIndexPickleID: { + Name: tablePickleStepResultIndexPickleID, + Unique: false, + Indexer: &memdb.StringFieldIndex{Field: "PickleID"}, + }, + tablePickleStepResultIndexStatus: { + Name: tablePickleStepResultIndexStatus, + Unique: false, + Indexer: &memdb.IntFieldIndex{Field: "Status"}, + }, + }, + }, + tableStepDefintionMatch: { + Name: tableStepDefintionMatch, + Indexes: map[string]*memdb.IndexSchema{ + tableStepDefintionMatchIndexStepID: { + Name: tableStepDefintionMatchIndexStepID, + Unique: true, + Indexer: &memdb.StringFieldIndex{Field: "StepID"}, + }, + }, + }, + }, + } + + db, err := memdb.NewMemDB(&schema) + if err != nil { + panic(err) + } + + return &Storage{db: db, testRunStartedLock: new(sync.Mutex)} +} + +// MustInsertPickle will insert a pickle and it's steps, +// will panic on error. +func (s *Storage) MustInsertPickle(p *messages.Pickle) { + txn := s.db.Txn(writeMode) + + if err := txn.Insert(tablePickle, p); err != nil { + panic(err) + } + + for _, step := range p.Steps { + if err := txn.Insert(tablePickleStep, step); err != nil { + panic(err) + } + } + + txn.Commit() +} + +// MustGetPickle will retrieve a pickle by id and panic on error. +func (s *Storage) MustGetPickle(id string) *messages.Pickle { + v := s.mustFirst(tablePickle, tablePickleIndexID, id) + return v.(*messages.Pickle) +} + +// MustGetPickles will retrieve pickles by URI and panic on error. +func (s *Storage) MustGetPickles(uri string) (ps []*messages.Pickle) { + it := s.mustGet(tablePickle, tablePickleIndexURI, uri) + for v := it.Next(); v != nil; v = it.Next() { + ps = append(ps, v.(*messages.Pickle)) + } + + return +} + +// MustGetPickleStep will retrieve a pickle step and panic on error. +func (s *Storage) MustGetPickleStep(id string) *messages.PickleStep { + v := s.mustFirst(tablePickleStep, tablePickleStepIndexID, id) + return v.(*messages.PickleStep) +} + +// MustInsertTestRunStarted will set the test run started event and panic on error. +func (s *Storage) MustInsertTestRunStarted(trs models.TestRunStarted) { + s.testRunStartedLock.Lock() + defer s.testRunStartedLock.Unlock() + + s.testRunStarted = trs +} + +// MustGetTestRunStarted will retrieve the test run started event and panic on error. +func (s *Storage) MustGetTestRunStarted() models.TestRunStarted { + s.testRunStartedLock.Lock() + defer s.testRunStartedLock.Unlock() + + return s.testRunStarted +} + +// MustInsertPickleResult will instert a pickle result and panic on error. +func (s *Storage) MustInsertPickleResult(pr models.PickleResult) { + s.mustInsert(tablePickleResult, pr) +} + +// MustInsertPickleStepResult will insert a pickle step result and panic on error. +func (s *Storage) MustInsertPickleStepResult(psr models.PickleStepResult) { + s.mustInsert(tablePickleStepResult, psr) +} + +// MustGetPickleResult will retrieve a pickle result by id and panic on error. +func (s *Storage) MustGetPickleResult(id string) models.PickleResult { + v := s.mustFirst(tablePickleResult, tablePickleResultIndexPickleID, id) + return v.(models.PickleResult) +} + +// MustGetPickleResults will retrieve all pickle results and panic on error. +func (s *Storage) MustGetPickleResults() (prs []models.PickleResult) { + it := s.mustGet(tablePickleResult, tablePickleResultIndexPickleID) + for v := it.Next(); v != nil; v = it.Next() { + prs = append(prs, v.(models.PickleResult)) + } + + return prs +} + +// MustGetPickleStepResult will retrieve a pickle strep result by id and panic on error. +func (s *Storage) MustGetPickleStepResult(id string) models.PickleStepResult { + v := s.mustFirst(tablePickleStepResult, tablePickleStepResultIndexPickleStepID, id) + return v.(models.PickleStepResult) +} + +// MustGetPickleStepResultsByPickleID will retrieve pickle step results by pickle id and panic on error. +func (s *Storage) MustGetPickleStepResultsByPickleID(pickleID string) (psrs []models.PickleStepResult) { + it := s.mustGet(tablePickleStepResult, tablePickleStepResultIndexPickleID, pickleID) + for v := it.Next(); v != nil; v = it.Next() { + psrs = append(psrs, v.(models.PickleStepResult)) + } + + return psrs +} + +// MustGetPickleStepResultsByPickleIDUntilStep will retrieve pickle step results by pickle id +// from 0..stepID for that pickle. +func (s *Storage) MustGetPickleStepResultsByPickleIDUntilStep(pickleID string, untilStepID string) (psrs []models.PickleStepResult) { + it := s.mustGet(tablePickleStepResult, tablePickleStepResultIndexPickleID, pickleID) + for v := it.Next(); v != nil; v = it.Next() { + psr := v.(models.PickleStepResult) + psrs = append(psrs, psr) + if psr.PickleStepID == untilStepID { + break + } + } + + return psrs +} + +// MustGetPickleStepResultsByStatus will retrieve pickle strep results by status and panic on error. +func (s *Storage) MustGetPickleStepResultsByStatus(status models.StepResultStatus) (psrs []models.PickleStepResult) { + it := s.mustGet(tablePickleStepResult, tablePickleStepResultIndexStatus, status) + for v := it.Next(); v != nil; v = it.Next() { + psrs = append(psrs, v.(models.PickleStepResult)) + } + + return psrs +} + +// MustInsertFeature will insert a feature and panic on error. +func (s *Storage) MustInsertFeature(f *models.Feature) { + s.mustInsert(tableFeature, f) +} + +// MustGetFeature will retrieve a feature by URI and panic on error. +func (s *Storage) MustGetFeature(uri string) *models.Feature { + v := s.mustFirst(tableFeature, tableFeatureIndexURI, uri) + return v.(*models.Feature) +} + +// MustGetFeatures will retrieve all features by and panic on error. +func (s *Storage) MustGetFeatures() (fs []*models.Feature) { + it := s.mustGet(tableFeature, tableFeatureIndexURI) + for v := it.Next(); v != nil; v = it.Next() { + fs = append(fs, v.(*models.Feature)) + } + + return +} + +type stepDefinitionMatch struct { + StepID string + StepDefinition *models.StepDefinition +} + +// MustInsertStepDefintionMatch will insert the matched StepDefintion for the step ID and panic on error. +func (s *Storage) MustInsertStepDefintionMatch(stepID string, match *models.StepDefinition) { + d := stepDefinitionMatch{ + StepID: stepID, + StepDefinition: match, + } + + s.mustInsert(tableStepDefintionMatch, d) +} + +// MustGetStepDefintionMatch will retrieve the matched StepDefintion for the step ID and panic on error. +func (s *Storage) MustGetStepDefintionMatch(stepID string) *models.StepDefinition { + v := s.mustFirst(tableStepDefintionMatch, tableStepDefintionMatchIndexStepID, stepID) + return v.(stepDefinitionMatch).StepDefinition +} + +func (s *Storage) mustInsert(table string, obj interface{}) { + txn := s.db.Txn(writeMode) + + if err := txn.Insert(table, obj); err != nil { + panic(err) + } + + txn.Commit() +} + +func (s *Storage) mustFirst(table, index string, args ...interface{}) interface{} { + txn := s.db.Txn(readMode) + defer txn.Abort() + + v, err := txn.First(table, index, args...) + if err != nil { + panic(err) + } else if v == nil { + err = fmt.Errorf("couldn't find index: %q in table: %q with args: %+v", index, table, args) + panic(err) + } + + return v +} + +func (s *Storage) mustGet(table, index string, args ...interface{}) memdb.ResultIterator { + txn := s.db.Txn(readMode) + defer txn.Abort() + + it, err := txn.Get(table, index, args...) + if err != nil { + panic(err) + } + + return it +} diff --git a/vendor/github.com/cucumber/godog/internal/tags/tag_filter.go b/vendor/github.com/cucumber/godog/internal/tags/tag_filter.go new file mode 100644 index 000000000..72b4512b4 --- /dev/null +++ b/vendor/github.com/cucumber/godog/internal/tags/tag_filter.go @@ -0,0 +1,62 @@ +package tags + +import ( + "strings" + + messages "github.com/cucumber/messages/go/v21" +) + +// ApplyTagFilter will apply a filter string on the +// array of pickles and returned the filtered list. +func ApplyTagFilter(filter string, pickles []*messages.Pickle) []*messages.Pickle { + if filter == "" { + return pickles + } + + var result = []*messages.Pickle{} + + for _, pickle := range pickles { + if match(filter, pickle.Tags) { + result = append(result, pickle) + } + } + + return result +} + +// Based on http://behat.readthedocs.org/en/v2.5/guides/6.cli.html#gherkin-filters +func match(filter string, tags []*messages.PickleTag) (ok bool) { + ok = true + + for _, andTags := range strings.Split(filter, "&&") { + var okComma bool + + for _, tag := range strings.Split(andTags, ",") { + tag = strings.TrimSpace(tag) + tag = strings.Replace(tag, "@", "", -1) + + okComma = contains(tags, tag) || okComma + + if tag[0] == '~' { + tag = tag[1:] + okComma = !contains(tags, tag) || okComma + } + } + + ok = ok && okComma + } + + return +} + +func contains(tags []*messages.PickleTag, tag string) bool { + for _, t := range tags { + tagName := strings.Replace(t.Name, "@", "", -1) + + if tagName == tag { + return true + } + } + + return false +} diff --git a/vendor/github.com/cucumber/godog/internal/utils/utils.go b/vendor/github.com/cucumber/godog/internal/utils/utils.go new file mode 100644 index 000000000..f1ec21f95 --- /dev/null +++ b/vendor/github.com/cucumber/godog/internal/utils/utils.go @@ -0,0 +1,21 @@ +package utils + +import ( + "strings" + "time" +) + +// S repeats a space n times +func S(n int) string { + if n < 0 { + n = 1 + } + return strings.Repeat(" ", n) +} + +// TimeNowFunc is a utility function to simply testing +// by allowing TimeNowFunc to be defined to zero time +// to remove the time domain from tests +var TimeNowFunc = func() time.Time { + return time.Now() +} diff --git a/vendor/github.com/cucumber/godog/logo.png b/vendor/github.com/cucumber/godog/logo.png new file mode 100644 index 000000000..70e6c7aa8 Binary files /dev/null and b/vendor/github.com/cucumber/godog/logo.png differ diff --git a/vendor/github.com/cucumber/godog/logo.svg b/vendor/github.com/cucumber/godog/logo.svg new file mode 100644 index 000000000..bfda7fdb1 --- /dev/null +++ b/vendor/github.com/cucumber/godog/logo.svg @@ -0,0 +1,79 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/vendor/github.com/cucumber/godog/mod_version.go b/vendor/github.com/cucumber/godog/mod_version.go new file mode 100644 index 000000000..c915e1627 --- /dev/null +++ b/vendor/github.com/cucumber/godog/mod_version.go @@ -0,0 +1,16 @@ +//go:build go1.12 +// +build go1.12 + +package godog + +import ( + "runtime/debug" +) + +func init() { + if info, available := debug.ReadBuildInfo(); available { + if Version == "v0.0.0-dev" && info.Main.Version != "(devel)" { + Version = info.Main.Version + } + } +} diff --git a/vendor/github.com/cucumber/godog/options.go b/vendor/github.com/cucumber/godog/options.go new file mode 100644 index 000000000..2b32cfd8f --- /dev/null +++ b/vendor/github.com/cucumber/godog/options.go @@ -0,0 +1,12 @@ +package godog + +import "github.com/cucumber/godog/internal/flags" + +// Options are suite run options +// flags are mapped to these options. +// +// It can also be used together with godog.RunWithOptions +// to run test suite from go source directly +// +// See the flags for more details +type Options = flags.Options diff --git a/vendor/github.com/cucumber/godog/run.go b/vendor/github.com/cucumber/godog/run.go new file mode 100644 index 000000000..1231d0286 --- /dev/null +++ b/vendor/github.com/cucumber/godog/run.go @@ -0,0 +1,409 @@ +package godog + +import ( + "context" + "flag" + "fmt" + "go/build" + "io" + "io/fs" + "math/rand" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "testing" + + messages "github.com/cucumber/messages/go/v21" + + "github.com/cucumber/godog/colors" + "github.com/cucumber/godog/formatters" + ifmt "github.com/cucumber/godog/internal/formatters" + "github.com/cucumber/godog/internal/models" + "github.com/cucumber/godog/internal/parser" + "github.com/cucumber/godog/internal/storage" + "github.com/cucumber/godog/internal/utils" +) + +const ( + exitSuccess int = iota + exitFailure + exitOptionError +) + +type ( + testSuiteInitializer func(*TestSuiteContext) + scenarioInitializer func(*ScenarioContext) +) + +type runner struct { + randomSeed int64 + stopOnFailure, strict bool + + defaultContext context.Context + testingT *testing.T + + features []*models.Feature + + testSuiteInitializer testSuiteInitializer + scenarioInitializer scenarioInitializer + + storage *storage.Storage + fmt Formatter +} + +func (r *runner) concurrent(rate int) (failed bool) { + var copyLock sync.Mutex + + if fmt, ok := r.fmt.(storageFormatter); ok { + fmt.SetStorage(r.storage) + } + + testSuiteContext := TestSuiteContext{ + suite: &suite{ + fmt: r.fmt, + randomSeed: r.randomSeed, + strict: r.strict, + storage: r.storage, + defaultContext: r.defaultContext, + testingT: r.testingT, + }, + } + if r.testSuiteInitializer != nil { + r.testSuiteInitializer(&testSuiteContext) + } + + testRunStarted := models.TestRunStarted{StartedAt: utils.TimeNowFunc()} + r.storage.MustInsertTestRunStarted(testRunStarted) + r.fmt.TestRunStarted() + + // run before suite handlers + for _, f := range testSuiteContext.beforeSuiteHandlers { + f() + } + + queue := make(chan int, rate) + for _, ft := range r.features { + pickles := make([]*messages.Pickle, len(ft.Pickles)) + if r.randomSeed != 0 { + r := rand.New(rand.NewSource(r.randomSeed)) + perm := r.Perm(len(ft.Pickles)) + for i, v := range perm { + pickles[v] = ft.Pickles[i] + } + } else { + copy(pickles, ft.Pickles) + } + + for i, p := range pickles { + pickle := *p + + queue <- i // reserve space in queue + + if i == 0 { + r.fmt.Feature(ft.GherkinDocument, ft.Uri, ft.Content) + } + + runPickle := func(fail *bool, pickle *messages.Pickle) { + defer func() { + <-queue // free a space in queue + }() + + if r.stopOnFailure && *fail { + return + } + + // Copy base suite. + suite := *testSuiteContext.suite + if rate > 1 { + // if running concurrently, only print at end of scenario to keep + // scenario logs segregated + ffmt := ifmt.WrapOnFlush(testSuiteContext.suite.fmt) + suite.fmt = ffmt + defer ffmt.Flush() + } + + if r.scenarioInitializer != nil { + sc := ScenarioContext{suite: &suite} + r.scenarioInitializer(&sc) + } + + err := suite.runPickle(pickle) + if suite.shouldFail(err) { + copyLock.Lock() + *fail = true + copyLock.Unlock() + } + } + + if rate == 1 { + // Running within the same goroutine for concurrency 1 + // to preserve original stacks and simplify debugging. + runPickle(&failed, &pickle) + } else { + go runPickle(&failed, &pickle) + } + } + } + + // wait until last are processed + for i := 0; i < rate; i++ { + queue <- i + } + + close(queue) + + // run after suite handlers + for _, f := range testSuiteContext.afterSuiteHandlers { + f() + } + + // print summary + r.fmt.Summary() + return +} + +func runWithOptions(suiteName string, runner runner, opt Options) int { + var output io.Writer = os.Stdout + if nil != opt.Output { + output = opt.Output + } + + multiFmt := ifmt.MultiFormatter{} + + for _, formatter := range strings.Split(opt.Format, ",") { + out := output + formatterParts := strings.SplitN(formatter, ":", 2) + + if len(formatterParts) > 1 { + f, err := os.Create(formatterParts[1]) + if err != nil { + err = fmt.Errorf( + `couldn't create file with name: "%s", error: %s`, + formatterParts[1], err.Error(), + ) + fmt.Fprintln(os.Stderr, err) + + return exitOptionError + } + + defer f.Close() + + out = f + } + + if opt.NoColors { + out = colors.Uncolored(out) + } else { + out = colors.Colored(out) + } + + if nil == formatters.FindFmt(formatterParts[0]) { + var names []string + for name := range formatters.AvailableFormatters() { + names = append(names, name) + } + fmt.Fprintln(os.Stderr, fmt.Errorf( + `unregistered formatter name: "%s", use one of: %s`, + opt.Format, + strings.Join(names, ", "), + )) + return exitOptionError + } + + multiFmt.Add(formatterParts[0], out) + } + + if opt.ShowStepDefinitions { + s := suite{} + sc := ScenarioContext{suite: &s} + runner.scenarioInitializer(&sc) + printStepDefinitions(s.steps, output) + return exitOptionError + } + + if len(opt.Paths) == 0 && len(opt.FeatureContents) == 0 { + inf, err := func() (fs.FileInfo, error) { + file, err := opt.FS.Open("features") + if err != nil { + return nil, err + } + defer file.Close() + + return file.Stat() + }() + if err == nil && inf.IsDir() { + opt.Paths = []string{"features"} + } + } + + if opt.Concurrency < 1 { + opt.Concurrency = 1 + } + + runner.fmt = multiFmt.FormatterFunc(suiteName, output) + opt.FS = storage.FS{FS: opt.FS} + + if len(opt.FeatureContents) > 0 { + features, err := parser.ParseFromBytes(opt.Tags, opt.Dialect, opt.FeatureContents) + if err != nil { + fmt.Fprintln(os.Stderr, err) + return exitOptionError + } + runner.features = append(runner.features, features...) + } + + if len(opt.Paths) > 0 { + features, err := parser.ParseFeatures(opt.FS, opt.Tags, opt.Dialect, opt.Paths) + if err != nil { + fmt.Fprintln(os.Stderr, err) + return exitOptionError + } + runner.features = append(runner.features, features...) + } + + runner.storage = storage.NewStorage() + for _, feat := range runner.features { + runner.storage.MustInsertFeature(feat) + + for _, pickle := range feat.Pickles { + runner.storage.MustInsertPickle(pickle) + } + } + + // user may have specified -1 option to create random seed + runner.randomSeed = opt.Randomize + if runner.randomSeed == -1 { + runner.randomSeed = makeRandomSeed() + } + + runner.stopOnFailure = opt.StopOnFailure + runner.strict = opt.Strict + runner.defaultContext = opt.DefaultContext + runner.testingT = opt.TestingT + + // store chosen seed in environment, so it could be seen in formatter summary report + os.Setenv("GODOG_SEED", strconv.FormatInt(runner.randomSeed, 10)) + // determine tested package + _, filename, _, _ := runtime.Caller(1) + os.Setenv("GODOG_TESTED_PACKAGE", runsFromPackage(filename)) + + failed := runner.concurrent(opt.Concurrency) + + // @TODO: should prevent from having these + os.Setenv("GODOG_SEED", "") + os.Setenv("GODOG_TESTED_PACKAGE", "") + if failed && opt.Format != "events" { + return exitFailure + } + return exitSuccess +} + +func runsFromPackage(fp string) string { + dir := filepath.Dir(fp) + + gopaths := filepath.SplitList(build.Default.GOPATH) + for _, gp := range gopaths { + gp = filepath.Join(gp, "src") + if strings.Index(dir, gp) == 0 { + return strings.TrimLeft(strings.Replace(dir, gp, "", 1), string(filepath.Separator)) + } + } + return dir +} + +// TestSuite allows for configuration +// of the Test Suite Execution +type TestSuite struct { + Name string + TestSuiteInitializer func(*TestSuiteContext) + ScenarioInitializer func(*ScenarioContext) + Options *Options +} + +// Run will execute the test suite. +// +// If options are not set, it will reads +// all configuration options from flags. +// +// The exit codes may vary from: +// +// 0 - success +// 1 - failed +// 2 - command line usage error +// 128 - or higher, os signal related error exit codes +// +// If there are flag related errors they will be directed to os.Stderr +func (ts TestSuite) Run() int { + if ts.Options == nil { + var err error + ts.Options, err = getDefaultOptions() + if err != nil { + return exitOptionError + } + } + if ts.Options.FS == nil { + ts.Options.FS = storage.FS{} + } + if ts.Options.ShowHelp { + flag.CommandLine.Usage() + + return 0 + } + + r := runner{testSuiteInitializer: ts.TestSuiteInitializer, scenarioInitializer: ts.ScenarioInitializer} + return runWithOptions(ts.Name, r, *ts.Options) +} + +// RetrieveFeatures will parse and return the features based on test suite option +// Any modification on the parsed features will not have any impact on the next Run of the Test Suite +func (ts TestSuite) RetrieveFeatures() ([]*models.Feature, error) { + opt := ts.Options + + if opt == nil { + var err error + opt, err = getDefaultOptions() + if err != nil { + return nil, err + } + } + + if ts.Options.FS == nil { + ts.Options.FS = storage.FS{} + } + + if len(opt.Paths) == 0 { + inf, err := func() (fs.FileInfo, error) { + file, err := opt.FS.Open("features") + if err != nil { + return nil, err + } + defer file.Close() + + return file.Stat() + }() + if err == nil && inf.IsDir() { + opt.Paths = []string{"features"} + } + } + + return parser.ParseFeatures(opt.FS, opt.Tags, opt.Dialect, opt.Paths) +} + +func getDefaultOptions() (*Options, error) { + opt := &Options{} + opt.Output = colors.Colored(os.Stdout) + + flagSet := flagSet(opt) + if err := flagSet.Parse(os.Args[1:]); err != nil { + fmt.Fprintln(os.Stderr, err) + return nil, err + } + + opt.Paths = flagSet.Args() + opt.FS = storage.FS{} + + return opt, nil +} diff --git a/vendor/github.com/cucumber/godog/stacktrace.go b/vendor/github.com/cucumber/godog/stacktrace.go new file mode 100644 index 000000000..686c6b09b --- /dev/null +++ b/vendor/github.com/cucumber/godog/stacktrace.go @@ -0,0 +1,141 @@ +package godog + +import ( + "fmt" + "go/build" + "io" + "path" + "path/filepath" + "runtime" + "strings" +) + +// Frame represents a program counter inside a stack frame. +type stackFrame uintptr + +// pc returns the program counter for this frame; +// multiple frames may have the same PC value. +func (f stackFrame) pc() uintptr { return uintptr(f) - 1 } + +// file returns the full path to the file that contains the +// function for this Frame's pc. +func (f stackFrame) file() string { + fn := runtime.FuncForPC(f.pc()) + if fn == nil { + return "unknown" + } + file, _ := fn.FileLine(f.pc()) + return file +} + +func trimGoPath(file string) string { + for _, p := range filepath.SplitList(build.Default.GOPATH) { + file = strings.Replace(file, filepath.Join(p, "src")+string(filepath.Separator), "", 1) + } + return file +} + +// line returns the line number of source code of the +// function for this Frame's pc. +func (f stackFrame) line() int { + fn := runtime.FuncForPC(f.pc()) + if fn == nil { + return 0 + } + _, line := fn.FileLine(f.pc()) + return line +} + +// Format formats the frame according to the fmt.Formatter interface. +// +// %s source file +// %d source line +// %n function name +// %v equivalent to %s:%d +// +// Format accepts flags that alter the printing of some verbs, as follows: +// +// %+s path of source file relative to the compile time GOPATH +// %+v equivalent to %+s:%d +func (f stackFrame) Format(s fmt.State, verb rune) { + funcname := func(name string) string { + i := strings.LastIndex(name, "/") + name = name[i+1:] + i = strings.Index(name, ".") + return name[i+1:] + } + + switch verb { + case 's': + switch { + case s.Flag('+'): + pc := f.pc() + fn := runtime.FuncForPC(pc) + if fn == nil { + io.WriteString(s, "unknown") + } else { + file, _ := fn.FileLine(pc) + fmt.Fprintf(s, "%s\n\t%s", fn.Name(), trimGoPath(file)) + } + default: + io.WriteString(s, path.Base(f.file())) + } + case 'd': + fmt.Fprintf(s, "%d", f.line()) + case 'n': + name := runtime.FuncForPC(f.pc()).Name() + io.WriteString(s, funcname(name)) + case 'v': + f.Format(s, 's') + io.WriteString(s, ":") + f.Format(s, 'd') + } +} + +// stack represents a stack of program counters. +type stack []uintptr + +func (s *stack) Format(st fmt.State, verb rune) { + switch verb { + case 'v': + switch { + case st.Flag('+'): + for _, pc := range *s { + f := stackFrame(pc) + fmt.Fprintf(st, "\n%+v", f) + } + } + } +} + +func callStack() *stack { + const depth = 32 + var pcs [depth]uintptr + n := runtime.Callers(3, pcs[:]) + var st stack = pcs[0:n] + return &st +} + +// fundamental is an error that has a message and a stack, but no caller. +type traceError struct { + msg string + *stack +} + +func (f *traceError) Error() string { return f.msg } + +func (f *traceError) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + if s.Flag('+') { + io.WriteString(s, f.msg) + f.stack.Format(s, verb) + return + } + fallthrough + case 's': + io.WriteString(s, f.msg) + case 'q': + fmt.Fprintf(s, "%q", f.msg) + } +} diff --git a/vendor/github.com/cucumber/godog/suite.go b/vendor/github.com/cucumber/godog/suite.go new file mode 100644 index 000000000..6ca1bf53d --- /dev/null +++ b/vendor/github.com/cucumber/godog/suite.go @@ -0,0 +1,651 @@ +package godog + +import ( + "context" + "errors" + "fmt" + "reflect" + "strings" + "testing" + + messages "github.com/cucumber/messages/go/v21" + + "github.com/cucumber/godog/formatters" + "github.com/cucumber/godog/internal/models" + "github.com/cucumber/godog/internal/storage" + "github.com/cucumber/godog/internal/utils" +) + +var ( + errorInterface = reflect.TypeOf((*error)(nil)).Elem() + contextInterface = reflect.TypeOf((*context.Context)(nil)).Elem() +) + +// more than one regex matched the step text +var ErrAmbiguous = fmt.Errorf("ambiguous step definition") + +// ErrUndefined is returned in case if step definition was not found +var ErrUndefined = fmt.Errorf("step is undefined") + +// ErrPending should be returned by step definition if +// step implementation is pending +var ErrPending = fmt.Errorf("step implementation is pending") + +// ErrSkip should be returned by step definition or a hook if scenario and further steps are to be skipped. +var ErrSkip = fmt.Errorf("skipped") + +// StepResultStatus describes step result. +type StepResultStatus = models.StepResultStatus + +const ( + // StepPassed indicates step that passed. + StepPassed StepResultStatus = models.Passed + // StepFailed indicates step that failed. + StepFailed = models.Failed + // StepSkipped indicates step that was skipped. + StepSkipped = models.Skipped + // StepUndefined indicates undefined step. + StepUndefined = models.Undefined + // StepPending indicates step with pending implementation. + StepPending = models.Pending + // StepAmbiguous indicates step text matches more than one step def + StepAmbiguous = models.Ambiguous +) + +type suite struct { + steps []*models.StepDefinition + + fmt Formatter + storage *storage.Storage + + failed bool + randomSeed int64 + stopOnFailure bool + strict bool + + defaultContext context.Context + testingT *testing.T + + // suite event handlers + beforeScenarioHandlers []BeforeScenarioHook + beforeStepHandlers []BeforeStepHook + afterStepHandlers []AfterStepHook + afterScenarioHandlers []AfterScenarioHook +} + +type Attachment struct { + Body []byte + FileName string + MediaType string +} + +type attachmentKey struct{} + +func Attach(ctx context.Context, attachments ...Attachment) context.Context { + existing := Attachments(ctx) + updated := append(existing, attachments...) + return context.WithValue(ctx, attachmentKey{}, updated) +} + +func Attachments(ctx context.Context) []Attachment { + v := ctx.Value(attachmentKey{}) + + if v == nil { + return []Attachment{} + } + return v.([]Attachment) +} + +func clearAttach(ctx context.Context) context.Context { + return context.WithValue(ctx, attachmentKey{}, nil) +} + +func pickleAttachments(ctx context.Context) []models.PickleAttachment { + + pickledAttachments := []models.PickleAttachment{} + attachments := Attachments(ctx) + + for _, a := range attachments { + pickledAttachments = append(pickledAttachments, models.PickleAttachment{ + Name: a.FileName, + Data: a.Body, + MimeType: a.MediaType, + }) + } + + return pickledAttachments +} + +func (s *suite) matchStep(step *messages.PickleStep) (*models.StepDefinition, error) { + def, err := s.matchStepTextAndType(step.Text, step.Type) + if err != nil { + return nil, err + } + + if def != nil && step.Argument != nil { + def.Args = append(def.Args, step.Argument) + } + return def, nil +} + +func (s *suite) runStep(ctx context.Context, pickle *Scenario, step *Step, scenarioErr error, isFirst, isLast bool) (rctx context.Context, err error) { + var match *models.StepDefinition + + rctx = ctx + + // user multistep definitions may panic + defer func() { + if e := recover(); e != nil { + pe, isErr := e.(error) + switch { + case isErr && errors.Is(pe, errStopNow): + // FailNow or SkipNow called on dogTestingT, so clear the error to let the normal + // below getTestingT(ctx).isFailed() call handle the reasons. + err = nil + case err != nil: + err = &traceError{ + msg: fmt.Sprintf("%s: %v", err.Error(), e), + stack: callStack(), + } + default: + err = &traceError{ + msg: fmt.Sprintf("%v", e), + stack: callStack(), + } + } + } + + earlyReturn := scenarioErr != nil || errors.Is(err, ErrUndefined) + + // Check for any calls to Fail on dogT + if err == nil { + if t := getTestingT(ctx); t != nil { + err = t.isFailed() + } + } + + status := StepUndefined + + switch { + case errors.Is(err, ErrAmbiguous): + status = StepAmbiguous + case errors.Is(err, ErrPending): + status = StepPending + case errors.Is(err, ErrSkip), err == nil && scenarioErr != nil: + status = StepSkipped + case errors.Is(err, ErrUndefined): + status = StepUndefined + case err != nil: + status = StepFailed + case err == nil && scenarioErr == nil: + status = StepPassed + } + + // Run after step handlers. + rctx, err = s.runAfterStepHooks(ctx, step, status, err) + + // Trigger after scenario on failing or last step to attach possible hook error to step. + if !s.shouldFail(scenarioErr) && (isLast || s.shouldFail(err)) { + rctx, err = s.runAfterScenarioHooks(rctx, pickle, err) + } + + // extract any accumulated attachments and clear them + pickledAttachments := pickleAttachments(rctx) + rctx = clearAttach(rctx) + + if earlyReturn { + return + } + + switch { + case err == nil: + sr := models.NewStepResult(models.Passed, pickle.Id, step.Id, match, pickledAttachments, nil) + s.storage.MustInsertPickleStepResult(sr) + s.fmt.Passed(pickle, step, match.GetInternalStepDefinition()) + case errors.Is(err, ErrPending): + sr := models.NewStepResult(models.Pending, pickle.Id, step.Id, match, pickledAttachments, nil) + s.storage.MustInsertPickleStepResult(sr) + s.fmt.Pending(pickle, step, match.GetInternalStepDefinition()) + case errors.Is(err, ErrSkip): + sr := models.NewStepResult(models.Skipped, pickle.Id, step.Id, match, pickledAttachments, nil) + s.storage.MustInsertPickleStepResult(sr) + s.fmt.Skipped(pickle, step, match.GetInternalStepDefinition()) + case errors.Is(err, ErrAmbiguous): + sr := models.NewStepResult(models.Ambiguous, pickle.Id, step.Id, match, pickledAttachments, err) + s.storage.MustInsertPickleStepResult(sr) + s.fmt.Ambiguous(pickle, step, match.GetInternalStepDefinition(), err) + default: + sr := models.NewStepResult(models.Failed, pickle.Id, step.Id, match, pickledAttachments, err) + s.storage.MustInsertPickleStepResult(sr) + s.fmt.Failed(pickle, step, match.GetInternalStepDefinition(), err) + } + }() + + // run before scenario handlers + if isFirst { + ctx, err = s.runBeforeScenarioHooks(ctx, pickle) + } + + // run before step handlers + ctx, err = s.runBeforeStepHooks(ctx, step, err) + + var matchError error + match, matchError = s.matchStep(step) + + s.storage.MustInsertStepDefintionMatch(step.AstNodeIds[0], match) + s.fmt.Defined(pickle, step, match.GetInternalStepDefinition()) + + if err != nil { + pickledAttachments := pickleAttachments(ctx) + ctx = clearAttach(ctx) + + sr := models.NewStepResult(models.Failed, pickle.Id, step.Id, match, pickledAttachments, nil) + s.storage.MustInsertPickleStepResult(sr) + return ctx, err + } + + if matchError != nil { + return ctx, matchError + } + + if ctx, undef, err := s.maybeUndefined(ctx, step.Text, step.Argument, step.Type); err != nil { + return ctx, err + } else if len(undef) > 0 { + if match != nil { + match = &models.StepDefinition{ + StepDefinition: formatters.StepDefinition{ + Expr: match.Expr, + Handler: match.Handler, + Keyword: match.Keyword, + }, + Args: match.Args, + HandlerValue: match.HandlerValue, + File: match.File, + Line: match.Line, + Nested: match.Nested, + Undefined: undef, + } + } + + pickledAttachments := pickleAttachments(ctx) + ctx = clearAttach(ctx) + + sr := models.NewStepResult(models.Undefined, pickle.Id, step.Id, match, pickledAttachments, nil) + s.storage.MustInsertPickleStepResult(sr) + + s.fmt.Undefined(pickle, step, match.GetInternalStepDefinition()) + return ctx, fmt.Errorf("%w: %s", ErrUndefined, step.Text) + } + + if scenarioErr != nil { + pickledAttachments := pickleAttachments(ctx) + ctx = clearAttach(ctx) + + sr := models.NewStepResult(models.Skipped, pickle.Id, step.Id, match, pickledAttachments, nil) + s.storage.MustInsertPickleStepResult(sr) + + s.fmt.Skipped(pickle, step, match.GetInternalStepDefinition()) + return ctx, nil + } + + ctx, err = s.maybeSubSteps(match.Run(ctx)) + + return ctx, err +} + +func (s *suite) runBeforeStepHooks(ctx context.Context, step *Step, err error) (context.Context, error) { + hooksFailed := false + + for _, f := range s.beforeStepHandlers { + hctx, herr := f(ctx, step) + if herr != nil { + hooksFailed = true + + if err == nil { + err = herr + } else { + err = fmt.Errorf("%v, %w", herr, err) + } + } + + if hctx != nil { + ctx = hctx + } + } + + if hooksFailed { + err = fmt.Errorf("before step hook failed: %w", err) + } + + return ctx, err +} + +func (s *suite) runAfterStepHooks(ctx context.Context, step *Step, status StepResultStatus, err error) (context.Context, error) { + for _, f := range s.afterStepHandlers { + hctx, herr := f(ctx, step, status, err) + + // Adding hook error to resulting error without breaking hooks loop. + if herr != nil { + if err == nil { + err = herr + } else { + err = fmt.Errorf("%v, %w", herr, err) + } + } + + if hctx != nil { + ctx = hctx + } + } + + return ctx, err +} + +func (s *suite) runBeforeScenarioHooks(ctx context.Context, pickle *messages.Pickle) (context.Context, error) { + var err error + + // run before scenario handlers + for _, f := range s.beforeScenarioHandlers { + hctx, herr := f(ctx, pickle) + if herr != nil { + if err == nil { + err = herr + } else { + err = fmt.Errorf("%v, %w", herr, err) + } + } + + if hctx != nil { + ctx = hctx + } + } + + if err != nil { + err = fmt.Errorf("before scenario hook failed: %w", err) + } + + return ctx, err +} + +func (s *suite) runAfterScenarioHooks(ctx context.Context, pickle *messages.Pickle, lastStepErr error) (context.Context, error) { + err := lastStepErr + + hooksFailed := false + isStepErr := true + + // run after scenario handlers + for _, f := range s.afterScenarioHandlers { + hctx, herr := f(ctx, pickle, err) + + // Adding hook error to resulting error without breaking hooks loop. + if herr != nil { + hooksFailed = true + + if err == nil { + isStepErr = false + err = herr + } else { + if isStepErr { + err = fmt.Errorf("step error: %w", err) + isStepErr = false + } + err = fmt.Errorf("%v, %w", herr, err) + } + } + + if hctx != nil { + ctx = hctx + } + } + + if hooksFailed { + err = fmt.Errorf("after scenario hook failed: %w", err) + } + + return ctx, err +} + +func (s *suite) maybeUndefined(ctx context.Context, text string, arg interface{}, stepType messages.PickleStepType) (context.Context, []string, error) { + var undefined []string + step, err := s.matchStepTextAndType(text, stepType) + if err != nil { + return ctx, undefined, err + } + + if nil == step { + return ctx, []string{text}, nil + } + + if !step.Nested { + return ctx, undefined, nil + } + + if arg != nil { + step.Args = append(step.Args, arg) + } + + ctx, steps := step.Run(ctx) + + for _, next := range steps.(Steps) { + lines := strings.Split(next, "\n") + // @TODO: we cannot currently parse table or content body from nested steps + if len(lines) > 1 { + return ctx, undefined, fmt.Errorf("nested steps cannot be multiline and have table or content body argument") + } + if len(lines[0]) > 0 && lines[0][len(lines[0])-1] == ':' { + return ctx, undefined, fmt.Errorf("nested steps cannot be multiline and have table or content body argument") + } + ctx, undef, err := s.maybeUndefined(ctx, next, nil, messages.PickleStepType_UNKNOWN) + if err != nil { + return ctx, undefined, err + } + undefined = append(undefined, undef...) + } + return ctx, undefined, nil +} + +func (s *suite) maybeSubSteps(ctx context.Context, result interface{}) (context.Context, error) { + if nil == result { + return ctx, nil + } + + if err, ok := result.(error); ok { + return ctx, err + } + + steps, ok := result.(Steps) + if !ok { + return ctx, fmt.Errorf("unexpected error, should have been godog.Steps: %T - %+v", result, result) + } + + for _, text := range steps { + def, err := s.matchStepTextAndType(text, messages.PickleStepType_UNKNOWN) + if err != nil { + return ctx, err + } + + if def == nil { + return ctx, fmt.Errorf("%w: %s", ErrUndefined, text) + } else { + ctx, err = s.runSubStep(ctx, text, def) + if err != nil { + return ctx, err + } + } + } + return ctx, nil +} + +func (s *suite) runSubStep(ctx context.Context, text string, def *models.StepDefinition) (_ context.Context, err error) { + st := &Step{} + st.Text = text + st.Type = messages.PickleStepType_ACTION + + defer func() { + status := StepPassed + + switch { + case errors.Is(err, ErrUndefined): + status = StepUndefined + case errors.Is(err, ErrPending): + status = StepPending + case err != nil: + status = StepFailed + } + + ctx, err = s.runAfterStepHooks(ctx, st, status, err) + }() + + ctx, err = s.runBeforeStepHooks(ctx, st, nil) + if err != nil { + return ctx, fmt.Errorf("%s: %+v", text, err) + } + + if ctx, err = s.maybeSubSteps(def.Run(ctx)); err != nil { + return ctx, fmt.Errorf("%s: %+v", text, err) + } + + return ctx, nil +} + +func (s *suite) matchStepTextAndType(text string, stepType messages.PickleStepType) (*models.StepDefinition, error) { + var first *models.StepDefinition + matchingExpressions := make([]string, 0) + + for _, h := range s.steps { + if m := h.Expr.FindStringSubmatch(text); len(m) > 0 { + if !keywordMatches(h.Keyword, stepType) { + continue + } + var args []interface{} + for _, m := range m[1:] { + args = append(args, m) + } + + matchingExpressions = append(matchingExpressions, h.Expr.String()) + + // since we need to assign arguments + // better to copy the step definition + match := &models.StepDefinition{ + StepDefinition: formatters.StepDefinition{ + Expr: h.Expr, + Handler: h.Handler, + Keyword: h.Keyword, + }, + Args: args, + HandlerValue: h.HandlerValue, + File: h.File, + Line: h.Line, + Nested: h.Nested, + } + + if first == nil { + first = match + } + } + } + + if s.strict { + if len(matchingExpressions) > 1 { + errs := "\n " + strings.Join(matchingExpressions, "\n ") + return nil, fmt.Errorf("%w, step text: %s\n matches:%s", ErrAmbiguous, text, errs) + } + } + + return first, nil +} + +func keywordMatches(k formatters.Keyword, stepType messages.PickleStepType) bool { + if k == formatters.None { + return true + } + switch stepType { + case messages.PickleStepType_CONTEXT: + return k == formatters.Given + case messages.PickleStepType_ACTION: + return k == formatters.When + case messages.PickleStepType_OUTCOME: + return k == formatters.Then + default: + return true + } +} + +func (s *suite) runSteps(ctx context.Context, pickle *Scenario, steps []*Step) (context.Context, error) { + var ( + stepErr, scenarioErr error + ) + + for i, step := range steps { + isLast := i == len(steps)-1 + isFirst := i == 0 + ctx, stepErr = s.runStep(ctx, pickle, step, scenarioErr, isFirst, isLast) + if scenarioErr == nil || s.shouldFail(stepErr) { + scenarioErr = stepErr + } + } + + return ctx, scenarioErr +} + +func (s *suite) shouldFail(err error) bool { + if err == nil || errors.Is(err, ErrSkip) { + return false + } + + if errors.Is(err, ErrUndefined) || errors.Is(err, ErrPending) { + return s.strict + } + + return true +} + +func (s *suite) runPickle(pickle *messages.Pickle) (err error) { + ctx := s.defaultContext + if ctx == nil { + ctx = context.Background() + } + + ctx, cancel := context.WithCancel(ctx) + + defer cancel() + + if len(pickle.Steps) == 0 { + pr := models.PickleResult{PickleID: pickle.Id, StartedAt: utils.TimeNowFunc()} + s.storage.MustInsertPickleResult(pr) + + s.fmt.Pickle(pickle) + return fmt.Errorf("%w: no steps in scenario", ErrUndefined) + } + + // Before scenario hooks are called in context of first evaluated step + // so that error from handler can be added to step. + + pr := models.PickleResult{PickleID: pickle.Id, StartedAt: utils.TimeNowFunc()} + s.storage.MustInsertPickleResult(pr) + + s.fmt.Pickle(pickle) + + dt := &testingT{ + name: pickle.Name, + } + ctx = setContextTestingT(ctx, dt) + // scenario + if s.testingT != nil { + // Running scenario as a subtest. + s.testingT.Run(pickle.Name, func(t *testing.T) { + dt.t = t + ctx, err = s.runSteps(ctx, pickle, pickle.Steps) + if s.shouldFail(err) { + t.Errorf("%+v", err) + } + }) + } else { + ctx, err = s.runSteps(ctx, pickle, pickle.Steps) + } + + // After scenario handlers are called in context of last evaluated step + // so that error from handler can be added to step. + + return err +} diff --git a/vendor/github.com/cucumber/godog/test_context.go b/vendor/github.com/cucumber/godog/test_context.go new file mode 100644 index 000000000..add9f47b0 --- /dev/null +++ b/vendor/github.com/cucumber/godog/test_context.go @@ -0,0 +1,371 @@ +package godog + +import ( + "context" + "fmt" + "reflect" + "regexp" + "runtime" + + messages "github.com/cucumber/messages/go/v21" + + "github.com/cucumber/godog/formatters" + "github.com/cucumber/godog/internal/builder" + "github.com/cucumber/godog/internal/flags" + "github.com/cucumber/godog/internal/models" +) + +// GherkinDocument represents gherkin document. +type GherkinDocument = messages.GherkinDocument + +// Scenario represents the executed scenario +type Scenario = messages.Pickle + +// Step represents the executed step +type Step = messages.PickleStep + +// Steps allows to nest steps +// instead of returning an error in step func +// it is possible to return combined steps: +// +// func multistep(name string) godog.Steps { +// return godog.Steps{ +// fmt.Sprintf(`an user named "%s"`, name), +// fmt.Sprintf(`user "%s" is authenticated`, name), +// } +// } +// +// These steps will be matched and executed in +// sequential order. The first one which fails +// will result in main step failure. +type Steps []string + +// StepDefinition is a registered step definition +// contains a StepHandler and regexp which +// is used to match a step. Args which +// were matched by last executed step +// +// This structure is passed to the formatter +// when step is matched and is either failed +// or successful +type StepDefinition = formatters.StepDefinition + +// DocString represents the DocString argument made to a step definition +type DocString = messages.PickleDocString + +// Table represents the Table argument made to a step definition +type Table = messages.PickleTable + +// TestSuiteContext allows various contexts +// to register event handlers. +// +// When running a test suite, the instance of TestSuiteContext +// is passed to all functions (contexts), which +// have it as a first and only argument. +// +// Note that all event hooks does not catch panic errors +// in order to have a trace information +type TestSuiteContext struct { + beforeSuiteHandlers []func() + afterSuiteHandlers []func() + + suite *suite +} + +// BeforeSuite registers a function or method +// to be run once before suite runner. +// +// Use it to prepare the test suite for a spin. +// Connect and prepare database for instance... +func (ctx *TestSuiteContext) BeforeSuite(fn func()) { + ctx.beforeSuiteHandlers = append(ctx.beforeSuiteHandlers, fn) +} + +// AfterSuite registers a function or method +// to be run once after suite runner +func (ctx *TestSuiteContext) AfterSuite(fn func()) { + ctx.afterSuiteHandlers = append(ctx.afterSuiteHandlers, fn) +} + +// ScenarioContext allows registering scenario hooks. +func (ctx *TestSuiteContext) ScenarioContext() *ScenarioContext { + return &ScenarioContext{ + suite: ctx.suite, + } +} + +// ScenarioContext allows various contexts +// to register steps and event handlers. +// +// When running a scenario, the instance of ScenarioContext +// is passed to all functions (contexts), which +// have it as a first and only argument. +// +// Note that all event hooks does not catch panic errors +// in order to have a trace information. Only step +// executions are catching panic error since it may +// be a context specific error. +type ScenarioContext struct { + suite *suite +} + +// StepContext allows registering step hooks. +type StepContext struct { + suite *suite +} + +// Before registers a function or method +// to be run before every scenario. +// +// It is a good practice to restore the default state +// before every scenario, so it would be isolated from +// any kind of state. +func (ctx ScenarioContext) Before(h BeforeScenarioHook) { + ctx.suite.beforeScenarioHandlers = append(ctx.suite.beforeScenarioHandlers, h) +} + +// BeforeScenarioHook defines a hook before scenario. +type BeforeScenarioHook func(ctx context.Context, sc *Scenario) (context.Context, error) + +// After registers a function or method +// to be run after every scenario. +func (ctx ScenarioContext) After(h AfterScenarioHook) { + ctx.suite.afterScenarioHandlers = append(ctx.suite.afterScenarioHandlers, h) +} + +// AfterScenarioHook defines a hook after scenario. +type AfterScenarioHook func(ctx context.Context, sc *Scenario, err error) (context.Context, error) + +// StepContext exposes StepContext of a scenario. +func (ctx ScenarioContext) StepContext() StepContext { + return StepContext(ctx) +} + +// Before registers a function or method +// to be run before every step. +func (ctx StepContext) Before(h BeforeStepHook) { + ctx.suite.beforeStepHandlers = append(ctx.suite.beforeStepHandlers, h) +} + +// BeforeStepHook defines a hook before step. +type BeforeStepHook func(ctx context.Context, st *Step) (context.Context, error) + +// After registers a function or method +// to be run after every step. +// +// It may be convenient to return a different kind of error +// in order to print more state details which may help +// in case of step failure +// +// In some cases, for example when running a headless +// browser, to take a screenshot after failure. +func (ctx StepContext) After(h AfterStepHook) { + ctx.suite.afterStepHandlers = append(ctx.suite.afterStepHandlers, h) +} + +// AfterStepHook defines a hook after step. +type AfterStepHook func(ctx context.Context, st *Step, status StepResultStatus, err error) (context.Context, error) + +// BeforeScenario registers a function or method +// to be run before every scenario. +// +// It is a good practice to restore the default state +// before every scenario, so it would be isolated from +// any kind of state. +// +// Deprecated: use Before. +func (ctx ScenarioContext) BeforeScenario(fn func(sc *Scenario)) { + ctx.Before(func(ctx context.Context, sc *Scenario) (context.Context, error) { + fn(sc) + + return ctx, nil + }) +} + +// AfterScenario registers a function or method +// to be run after every scenario. +// +// Deprecated: use After. +func (ctx ScenarioContext) AfterScenario(fn func(sc *Scenario, err error)) { + ctx.After(func(ctx context.Context, sc *Scenario, err error) (context.Context, error) { + fn(sc, err) + + return ctx, nil + }) +} + +// BeforeStep registers a function or method +// to be run before every step. +// +// Deprecated: use ScenarioContext.StepContext() and StepContext.Before. +func (ctx ScenarioContext) BeforeStep(fn func(st *Step)) { + ctx.StepContext().Before(func(ctx context.Context, st *Step) (context.Context, error) { + fn(st) + + return ctx, nil + }) +} + +// AfterStep registers a function or method +// to be run after every step. +// +// It may be convenient to return a different kind of error +// in order to print more state details which may help +// in case of step failure +// +// In some cases, for example when running a headless +// browser, to take a screenshot after failure. +// +// Deprecated: use ScenarioContext.StepContext() and StepContext.After. +func (ctx ScenarioContext) AfterStep(fn func(st *Step, err error)) { + ctx.StepContext().After(func(ctx context.Context, st *Step, status StepResultStatus, err error) (context.Context, error) { + fn(st, err) + + return ctx, nil + }) +} + +// Step allows to register a *StepDefinition in the +// Godog feature suite, the definition will be applied +// to all steps matching the given Regexp expr. +// +// It will panic if expr is not a valid regular +// expression or stepFunc is not a valid step +// handler. +// +// The expression can be of type: *regexp.Regexp, string or []byte +// +// The stepFunc may accept one or several arguments of type: +// - int, int8, int16, int32, int64 +// - float32, float64 +// - string +// - []byte +// - *godog.DocString +// - *godog.Table +// +// The stepFunc need to return either an error or []string for multistep +// +// Note that if there are two definitions which may match +// the same step, then only the first matched handler +// will be applied. +// +// If none of the *StepDefinition is matched, then +// ErrUndefined error will be returned when +// running steps. +func (ctx ScenarioContext) Step(expr, stepFunc interface{}) { + ctx.stepWithKeyword(expr, stepFunc, formatters.None) +} + +// Given functions identically to Step, but the *StepDefinition +// will only be matched if the step starts with "Given". "And" +// and "But" keywords copy the keyword of the last step for the +// purpose of matching. +func (ctx ScenarioContext) Given(expr, stepFunc interface{}) { + ctx.stepWithKeyword(expr, stepFunc, formatters.Given) +} + +// When functions identically to Step, but the *StepDefinition +// will only be matched if the step starts with "When". "And" +// and "But" keywords copy the keyword of the last step for the +// purpose of matching. +func (ctx ScenarioContext) When(expr, stepFunc interface{}) { + ctx.stepWithKeyword(expr, stepFunc, formatters.When) +} + +// Then functions identically to Step, but the *StepDefinition +// will only be matched if the step starts with "Then". "And" +// and "But" keywords copy the keyword of the last step for the +// purpose of matching. +func (ctx ScenarioContext) Then(expr, stepFunc interface{}) { + ctx.stepWithKeyword(expr, stepFunc, formatters.Then) +} + +func (ctx ScenarioContext) stepWithKeyword(expr interface{}, stepFunc interface{}, keyword formatters.Keyword) { + var regex *regexp.Regexp + + // Validate the first input param is regex compatible + switch t := expr.(type) { + case *regexp.Regexp: + regex = t + case string: + regex = regexp.MustCompile(t) + case []byte: + regex = regexp.MustCompile(string(t)) + default: + panic(fmt.Sprintf("expecting expr to be a *regexp.Regexp or a string or []byte, got type: %T", expr)) + } + + // Validate that the handler is a function. + handlerType := reflect.TypeOf(stepFunc) + if handlerType.Kind() != reflect.Func { + panic(fmt.Sprintf("expected handler to be func, but got: %T", stepFunc)) + } + + // FIXME = Validate the handler function param types here so + // that any errors are discovered early. + // StepDefinition.Run defines the supported types but fails at run time not registration time + + // Validate the function's return types. + helpPrefix := "expected handler to return one of error or context.Context or godog.Steps or (context.Context, error)" + isNested := false + + numOut := handlerType.NumOut() + switch numOut { + case 0: + // No return values. + case 1: + // One return value: should be error, Steps, or context.Context. + outType := handlerType.Out(0) + if outType == reflect.TypeOf(Steps{}) { + isNested = true + } else { + if outType != errorInterface && outType != contextInterface { + panic(fmt.Sprintf("%s, but got: %v", helpPrefix, outType)) + } + } + case 2: + // Two return values: should be (context.Context, error). + if handlerType.Out(0) != contextInterface || handlerType.Out(1) != errorInterface { + panic(fmt.Sprintf("%s, but got: %v, %v", helpPrefix, handlerType.Out(0), handlerType.Out(1))) + } + default: + // More than two return values. + panic(fmt.Sprintf("expected handler to return either zero, one or two values, but it has: %d", numOut)) + } + + // Register the handler + def := &models.StepDefinition{ + StepDefinition: formatters.StepDefinition{ + Handler: stepFunc, + Expr: regex, + Keyword: keyword, + }, + HandlerValue: reflect.ValueOf(stepFunc), + Nested: isNested, + } + + // Get the file and line number of the call that created this step with a + // call to one of the Step, Given, When, or Then wrappers. + _, def.File, def.Line, _ = runtime.Caller(2) + + // stash the step + ctx.suite.steps = append(ctx.suite.steps, def) +} + +// Build creates a test package like go test command at given target path. +// If there are no go files in tested directory, then +// it simply builds a godog executable to scan features. +// +// If there are go test files, it first builds a test +// package with standard go test command. +// +// Finally, it generates godog suite executable which +// registers exported godog contexts from the test files +// of tested package. +// +// Returns the path to generated executable +func Build(bin string) error { + return builder.Build(bin) +} + +type Feature = flags.Feature diff --git a/vendor/github.com/cucumber/godog/testingt.go b/vendor/github.com/cucumber/godog/testingt.go new file mode 100644 index 000000000..25981b89a --- /dev/null +++ b/vendor/github.com/cucumber/godog/testingt.go @@ -0,0 +1,206 @@ +package godog + +import ( + "context" + "fmt" + "strings" + "testing" +) + +// T returns a TestingT compatible interface from the current test context. It will return nil if +// called outside the context of a test. This can be used with (for example) testify's assert and +// require packages. +func T(ctx context.Context) TestingT { + return getTestingT(ctx) +} + +// TestingT is a subset of the public methods implemented by go's testing.T. It allows assertion +// libraries to be used with godog, provided they depend only on this subset of methods. +type TestingT interface { + // Name returns the name of the current pickle under test + Name() string + // Log will log to the current testing.T log if set, otherwise it will log to stdout + Log(args ...interface{}) + // Logf will log a formatted string to the current testing.T log if set, otherwise it will log + // to stdout + Logf(format string, args ...interface{}) + // Error fails the current test and logs the provided arguments. Equivalent to calling Log then + // Fail. + Error(args ...interface{}) + // Errorf fails the current test and logs the formatted message. Equivalent to calling Logf then + // Fail. + Errorf(format string, args ...interface{}) + // Fail marks the current test as failed, but does not halt execution of the step. + Fail() + // FailNow marks the current test as failed and halts execution of the step. + FailNow() + // Fatal logs the provided arguments, marks the test as failed and halts execution of the step. + Fatal(args ...interface{}) + // Fatal logs the formatted message, marks the test as failed and halts execution of the step. + Fatalf(format string, args ...interface{}) + // Skip logs the provided arguments and marks the test as skipped but does not halt execution + // of the step. + Skip(args ...interface{}) + // Skipf logs the formatted message and marks the test as skipped but does not halt execution + // of the step. + Skipf(format string, args ...interface{}) + // SkipNow marks the current test as skipped and halts execution of the step. + SkipNow() + // Skipped returns true if the test has been marked as skipped. + Skipped() bool +} + +// Logf will log test output. If called in the context of a test and testing.T has been registered, +// this will log using the step's testing.T, else it will simply log to stdout. +func Logf(ctx context.Context, format string, args ...interface{}) { + if t := getTestingT(ctx); t != nil { + t.Logf(format, args...) + return + } + fmt.Printf(format+"\n", args...) +} + +// Log will log test output. If called in the context of a test and testing.T has been registered, +// this will log using the step's testing.T, else it will simply log to stdout. +func Log(ctx context.Context, args ...interface{}) { + if t := getTestingT(ctx); t != nil { + t.Log(args...) + return + } + fmt.Println(args...) +} + +// LoggedMessages returns an array of any logged messages that have been recorded during the test +// through calls to godog.Log / godog.Logf or via operations against godog.T(ctx) +func LoggedMessages(ctx context.Context) []string { + if t := getTestingT(ctx); t != nil { + return t.logMessages + } + return nil +} + +// errStopNow should be returned inside a panic within the test to immediately halt execution of that +// test +var errStopNow = fmt.Errorf("FailNow or SkipNow called") + +type testingT struct { + name string + t *testing.T + failed bool + skipped bool + failMessages []string + logMessages []string +} + +// check interface against our testingT and the upstream testing.B/F/T: +var ( + _ TestingT = &testingT{} + _ TestingT = (*testing.T)(nil) +) + +func (dt *testingT) Name() string { + if dt.t != nil { + return dt.t.Name() + } + return dt.name +} + +func (dt *testingT) Log(args ...interface{}) { + dt.logMessages = append(dt.logMessages, fmt.Sprint(args...)) + if dt.t != nil { + dt.t.Log(args...) + return + } + fmt.Println(args...) +} + +func (dt *testingT) Logf(format string, args ...interface{}) { + dt.logMessages = append(dt.logMessages, fmt.Sprintf(format, args...)) + if dt.t != nil { + dt.t.Logf(format, args...) + return + } + fmt.Printf(format+"\n", args...) +} + +func (dt *testingT) Error(args ...interface{}) { + dt.Log(args...) + dt.failMessages = append(dt.failMessages, fmt.Sprintln(args...)) + dt.Fail() +} + +func (dt *testingT) Errorf(format string, args ...interface{}) { + dt.Logf(format, args...) + dt.failMessages = append(dt.failMessages, fmt.Sprintf(format, args...)) + dt.Fail() +} + +func (dt *testingT) Fail() { + dt.failed = true +} + +func (dt *testingT) FailNow() { + dt.Fail() + panic(errStopNow) +} + +func (dt *testingT) Fatal(args ...interface{}) { + dt.Log(args...) + dt.FailNow() +} + +func (dt *testingT) Fatalf(format string, args ...interface{}) { + dt.Logf(format, args...) + dt.FailNow() +} + +func (dt *testingT) Skip(args ...interface{}) { + dt.Log(args...) + dt.skipped = true +} + +func (dt *testingT) Skipf(format string, args ...interface{}) { + dt.Logf(format, args...) + dt.skipped = true +} + +func (dt *testingT) SkipNow() { + dt.skipped = true + panic(errStopNow) +} + +func (dt *testingT) Skipped() bool { + return dt.skipped +} + +// isFailed will return an error representing the calls to Fail made during this test +func (dt *testingT) isFailed() error { + if dt.skipped { + return ErrSkip + } + if !dt.failed { + return nil + } + switch len(dt.failMessages) { + case 0: + return fmt.Errorf("fail called on TestingT") + case 1: + return fmt.Errorf(dt.failMessages[0]) + default: + return fmt.Errorf("checks failed:\n* %s", strings.Join(dt.failMessages, "\n* ")) + } +} + +type testingTCtxVal struct{} + +func setContextTestingT(ctx context.Context, dt *testingT) context.Context { + return context.WithValue(ctx, testingTCtxVal{}, dt) +} + +func getTestingT(ctx context.Context) *testingT { + dt, ok := ctx.Value(testingTCtxVal{}).(*testingT) + if !ok { + return nil + } + return dt +} diff --git a/vendor/github.com/cucumber/messages/go/v21/.gitignore b/vendor/github.com/cucumber/messages/go/v21/.gitignore new file mode 100644 index 000000000..7b0ee7aeb --- /dev/null +++ b/vendor/github.com/cucumber/messages/go/v21/.gitignore @@ -0,0 +1,17 @@ +.built +.compared +.deps +.dist +.dist-compressed +.go-get +.gofmt +.linted +.tested* +acceptance/ +bin/ +dist/ +dist_compressed/ +*.bin +*.iml +# upx dist/cucumber-gherkin-openbsd-386 fails with a core dump +core.*.!usr!bin!upx-ucl diff --git a/vendor/github.com/cucumber/messages/go/v21/Dockerfile b/vendor/github.com/cucumber/messages/go/v21/Dockerfile new file mode 100644 index 000000000..64e1c2795 --- /dev/null +++ b/vendor/github.com/cucumber/messages/go/v21/Dockerfile @@ -0,0 +1,29 @@ +#syntax=docker/dockerfile:1.4 + +# Base image +ARG GO_VERSION=1.19 +FROM golang:${GO_VERSION}-alpine AS golang +WORKDIR /cucumber + + +# Dummy stage for generated code, overriden in main build +FROM scratch AS schema-codegen + + +FROM golang AS with-dependencies + +COPY --link go.mod go.sum . +RUN --mount=type=cache,target=/go/pkg/mod/cache \ + go mod download && go mod verify + + +FROM golang AS tested + +RUN apk add gcc libc-dev + +COPY --link . . +COPY --link --from=with-dependencies /go/pkg /go/pkg +COPY --link --from=schema-codegen /*.go . + +RUN gofmt -w . +RUN go test --v diff --git a/vendor/github.com/cucumber/messages/go/v21/LICENSE b/vendor/github.com/cucumber/messages/go/v21/LICENSE new file mode 100644 index 000000000..725ba9f4a --- /dev/null +++ b/vendor/github.com/cucumber/messages/go/v21/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) Cucumber Ltd + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/cucumber/messages/go/v21/Makefile b/vendor/github.com/cucumber/messages/go/v21/Makefile new file mode 100644 index 000000000..898214efa --- /dev/null +++ b/vendor/github.com/cucumber/messages/go/v21/Makefile @@ -0,0 +1,20 @@ +schemas = $(shell find ../jsonschema -name "*.json") + +.DEFAULT_GOAL = help + +help: ## Show this help + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \n\nWhere is one of:\n"} /^[$$()% a-zA-Z_-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) + +generate: require messages.go ## Generate go code based on the schemas found in ../jsonschema and using the scripts in ../jsonschema/scripts for the generation + +require: ## Check requirements for the code generation (ruby and go are required) + @ruby --version >/dev/null 2>&1 || (echo "ERROR: ruby is required."; exit 1) + @go version >/dev/null 2>&1 || (echo "ERROR: go is required."; exit 1) + +clean: ## Remove automatically generated files and related artifacts + rm -f messages.go + +messages.go: $(schemas) ../jsonschema/scripts/codegen.rb ../jsonschema/scripts/templates/go.go.erb ../jsonschema/scripts/templates/go.enum.go.erb + ruby ../jsonschema/scripts/codegen.rb Go ../jsonschema go.go.erb > $@ + ruby ../jsonschema/scripts/codegen.rb Go ../jsonschema go.enum.go.erb >> $@ + go fmt messages.go diff --git a/vendor/github.com/cucumber/messages/go/v21/id_generator.go b/vendor/github.com/cucumber/messages/go/v21/id_generator.go new file mode 100644 index 000000000..a721f9789 --- /dev/null +++ b/vendor/github.com/cucumber/messages/go/v21/id_generator.go @@ -0,0 +1,28 @@ +package messages + +import ( + "github.com/gofrs/uuid" + "strconv" +) + +type IdGenerator interface { + newId() func() string +} + +type Incrementing struct { + next int +} + +func (self *Incrementing) NewId() string { + result := strconv.Itoa(self.next) + self.next++ + return result +} + +type UUID struct { + next int +} + +func (i UUID) NewId() string { + return uuid.Must(uuid.NewV4()).String() +} diff --git a/vendor/github.com/cucumber/messages/go/v21/messages.go b/vendor/github.com/cucumber/messages/go/v21/messages.go new file mode 100644 index 000000000..fbb173d4e --- /dev/null +++ b/vendor/github.com/cucumber/messages/go/v21/messages.go @@ -0,0 +1,510 @@ +package messages + +type Attachment struct { + Body string `json:"body"` + ContentEncoding AttachmentContentEncoding `json:"contentEncoding"` + FileName string `json:"fileName,omitempty"` + MediaType string `json:"mediaType"` + Source *Source `json:"source,omitempty"` + TestCaseStartedId string `json:"testCaseStartedId,omitempty"` + TestStepId string `json:"testStepId,omitempty"` + Url string `json:"url,omitempty"` +} + +type Duration struct { + Seconds int64 `json:"seconds"` + Nanos int64 `json:"nanos"` +} + +type Envelope struct { + Attachment *Attachment `json:"attachment,omitempty"` + GherkinDocument *GherkinDocument `json:"gherkinDocument,omitempty"` + Hook *Hook `json:"hook,omitempty"` + Meta *Meta `json:"meta,omitempty"` + ParameterType *ParameterType `json:"parameterType,omitempty"` + ParseError *ParseError `json:"parseError,omitempty"` + Pickle *Pickle `json:"pickle,omitempty"` + Source *Source `json:"source,omitempty"` + StepDefinition *StepDefinition `json:"stepDefinition,omitempty"` + TestCase *TestCase `json:"testCase,omitempty"` + TestCaseFinished *TestCaseFinished `json:"testCaseFinished,omitempty"` + TestCaseStarted *TestCaseStarted `json:"testCaseStarted,omitempty"` + TestRunFinished *TestRunFinished `json:"testRunFinished,omitempty"` + TestRunStarted *TestRunStarted `json:"testRunStarted,omitempty"` + TestStepFinished *TestStepFinished `json:"testStepFinished,omitempty"` + TestStepStarted *TestStepStarted `json:"testStepStarted,omitempty"` + UndefinedParameterType *UndefinedParameterType `json:"undefinedParameterType,omitempty"` +} + +type Exception struct { + Type string `json:"type"` + Message string `json:"message,omitempty"` +} + +type GherkinDocument struct { + Uri string `json:"uri,omitempty"` + Feature *Feature `json:"feature,omitempty"` + Comments []*Comment `json:"comments"` +} + +type Background struct { + Location *Location `json:"location"` + Keyword string `json:"keyword"` + Name string `json:"name"` + Description string `json:"description"` + Steps []*Step `json:"steps"` + Id string `json:"id"` +} + +type Comment struct { + Location *Location `json:"location"` + Text string `json:"text"` +} + +type DataTable struct { + Location *Location `json:"location"` + Rows []*TableRow `json:"rows"` +} + +type DocString struct { + Location *Location `json:"location"` + MediaType string `json:"mediaType,omitempty"` + Content string `json:"content"` + Delimiter string `json:"delimiter"` +} + +type Examples struct { + Location *Location `json:"location"` + Tags []*Tag `json:"tags"` + Keyword string `json:"keyword"` + Name string `json:"name"` + Description string `json:"description"` + TableHeader *TableRow `json:"tableHeader,omitempty"` + TableBody []*TableRow `json:"tableBody"` + Id string `json:"id"` +} + +type Feature struct { + Location *Location `json:"location"` + Tags []*Tag `json:"tags"` + Language string `json:"language"` + Keyword string `json:"keyword"` + Name string `json:"name"` + Description string `json:"description"` + Children []*FeatureChild `json:"children"` +} + +type FeatureChild struct { + Rule *Rule `json:"rule,omitempty"` + Background *Background `json:"background,omitempty"` + Scenario *Scenario `json:"scenario,omitempty"` +} + +type Rule struct { + Location *Location `json:"location"` + Tags []*Tag `json:"tags"` + Keyword string `json:"keyword"` + Name string `json:"name"` + Description string `json:"description"` + Children []*RuleChild `json:"children"` + Id string `json:"id"` +} + +type RuleChild struct { + Background *Background `json:"background,omitempty"` + Scenario *Scenario `json:"scenario,omitempty"` +} + +type Scenario struct { + Location *Location `json:"location"` + Tags []*Tag `json:"tags"` + Keyword string `json:"keyword"` + Name string `json:"name"` + Description string `json:"description"` + Steps []*Step `json:"steps"` + Examples []*Examples `json:"examples"` + Id string `json:"id"` +} + +type Step struct { + Location *Location `json:"location"` + Keyword string `json:"keyword"` + KeywordType StepKeywordType `json:"keywordType,omitempty"` + Text string `json:"text"` + DocString *DocString `json:"docString,omitempty"` + DataTable *DataTable `json:"dataTable,omitempty"` + Id string `json:"id"` +} + +type TableCell struct { + Location *Location `json:"location"` + Value string `json:"value"` +} + +type TableRow struct { + Location *Location `json:"location"` + Cells []*TableCell `json:"cells"` + Id string `json:"id"` +} + +type Tag struct { + Location *Location `json:"location"` + Name string `json:"name"` + Id string `json:"id"` +} + +type Hook struct { + Id string `json:"id"` + Name string `json:"name,omitempty"` + SourceReference *SourceReference `json:"sourceReference"` + TagExpression string `json:"tagExpression,omitempty"` +} + +type Location struct { + Line int64 `json:"line"` + Column int64 `json:"column,omitempty"` +} + +type Meta struct { + ProtocolVersion string `json:"protocolVersion"` + Implementation *Product `json:"implementation"` + Runtime *Product `json:"runtime"` + Os *Product `json:"os"` + Cpu *Product `json:"cpu"` + Ci *Ci `json:"ci,omitempty"` +} + +type Ci struct { + Name string `json:"name"` + Url string `json:"url,omitempty"` + BuildNumber string `json:"buildNumber,omitempty"` + Git *Git `json:"git,omitempty"` +} + +type Git struct { + Remote string `json:"remote"` + Revision string `json:"revision"` + Branch string `json:"branch,omitempty"` + Tag string `json:"tag,omitempty"` +} + +type Product struct { + Name string `json:"name"` + Version string `json:"version,omitempty"` +} + +type ParameterType struct { + Name string `json:"name"` + RegularExpressions []string `json:"regularExpressions"` + PreferForRegularExpressionMatch bool `json:"preferForRegularExpressionMatch"` + UseForSnippets bool `json:"useForSnippets"` + Id string `json:"id"` +} + +type ParseError struct { + Source *SourceReference `json:"source"` + Message string `json:"message"` +} + +type Pickle struct { + Id string `json:"id"` + Uri string `json:"uri"` + Name string `json:"name"` + Language string `json:"language"` + Steps []*PickleStep `json:"steps"` + Tags []*PickleTag `json:"tags"` + AstNodeIds []string `json:"astNodeIds"` +} + +type PickleDocString struct { + MediaType string `json:"mediaType,omitempty"` + Content string `json:"content"` +} + +type PickleStep struct { + Argument *PickleStepArgument `json:"argument,omitempty"` + AstNodeIds []string `json:"astNodeIds"` + Id string `json:"id"` + Type PickleStepType `json:"type,omitempty"` + Text string `json:"text"` +} + +type PickleStepArgument struct { + DocString *PickleDocString `json:"docString,omitempty"` + DataTable *PickleTable `json:"dataTable,omitempty"` +} + +type PickleTable struct { + Rows []*PickleTableRow `json:"rows"` +} + +type PickleTableCell struct { + Value string `json:"value"` +} + +type PickleTableRow struct { + Cells []*PickleTableCell `json:"cells"` +} + +type PickleTag struct { + Name string `json:"name"` + AstNodeId string `json:"astNodeId"` +} + +type Source struct { + Uri string `json:"uri"` + Data string `json:"data"` + MediaType SourceMediaType `json:"mediaType"` +} + +type SourceReference struct { + Uri string `json:"uri,omitempty"` + JavaMethod *JavaMethod `json:"javaMethod,omitempty"` + JavaStackTraceElement *JavaStackTraceElement `json:"javaStackTraceElement,omitempty"` + Location *Location `json:"location,omitempty"` +} + +type JavaMethod struct { + ClassName string `json:"className"` + MethodName string `json:"methodName"` + MethodParameterTypes []string `json:"methodParameterTypes"` +} + +type JavaStackTraceElement struct { + ClassName string `json:"className"` + FileName string `json:"fileName"` + MethodName string `json:"methodName"` +} + +type StepDefinition struct { + Id string `json:"id"` + Pattern *StepDefinitionPattern `json:"pattern"` + SourceReference *SourceReference `json:"sourceReference"` +} + +type StepDefinitionPattern struct { + Source string `json:"source"` + Type StepDefinitionPatternType `json:"type"` +} + +type TestCase struct { + Id string `json:"id"` + PickleId string `json:"pickleId"` + TestSteps []*TestStep `json:"testSteps"` +} + +type Group struct { + Children []*Group `json:"children"` + Start int64 `json:"start,omitempty"` + Value string `json:"value,omitempty"` +} + +type StepMatchArgument struct { + Group *Group `json:"group"` + ParameterTypeName string `json:"parameterTypeName,omitempty"` +} + +type StepMatchArgumentsList struct { + StepMatchArguments []*StepMatchArgument `json:"stepMatchArguments"` +} + +type TestStep struct { + HookId string `json:"hookId,omitempty"` + Id string `json:"id"` + PickleStepId string `json:"pickleStepId,omitempty"` + StepDefinitionIds []string `json:"stepDefinitionIds,omitempty"` + StepMatchArgumentsLists []*StepMatchArgumentsList `json:"stepMatchArgumentsLists,omitempty"` +} + +type TestCaseFinished struct { + TestCaseStartedId string `json:"testCaseStartedId"` + Timestamp *Timestamp `json:"timestamp"` + WillBeRetried bool `json:"willBeRetried"` +} + +type TestCaseStarted struct { + Attempt int64 `json:"attempt"` + Id string `json:"id"` + TestCaseId string `json:"testCaseId"` + WorkerId string `json:"workerId,omitempty"` + Timestamp *Timestamp `json:"timestamp"` +} + +type TestRunFinished struct { + Message string `json:"message,omitempty"` + Success bool `json:"success"` + Timestamp *Timestamp `json:"timestamp"` + Exception *Exception `json:"exception,omitempty"` +} + +type TestRunStarted struct { + Timestamp *Timestamp `json:"timestamp"` +} + +type TestStepFinished struct { + TestCaseStartedId string `json:"testCaseStartedId"` + TestStepId string `json:"testStepId"` + TestStepResult *TestStepResult `json:"testStepResult"` + Timestamp *Timestamp `json:"timestamp"` +} + +type TestStepResult struct { + Duration *Duration `json:"duration"` + Message string `json:"message,omitempty"` + Status TestStepResultStatus `json:"status"` + Exception *Exception `json:"exception,omitempty"` +} + +type TestStepStarted struct { + TestCaseStartedId string `json:"testCaseStartedId"` + TestStepId string `json:"testStepId"` + Timestamp *Timestamp `json:"timestamp"` +} + +type Timestamp struct { + Seconds int64 `json:"seconds"` + Nanos int64 `json:"nanos"` +} + +type UndefinedParameterType struct { + Expression string `json:"expression"` + Name string `json:"name"` +} + +type AttachmentContentEncoding string + +const ( + AttachmentContentEncoding_IDENTITY AttachmentContentEncoding = "IDENTITY" + AttachmentContentEncoding_BASE64 AttachmentContentEncoding = "BASE64" +) + +func (e AttachmentContentEncoding) String() string { + switch e { + case AttachmentContentEncoding_IDENTITY: + return "IDENTITY" + case AttachmentContentEncoding_BASE64: + return "BASE64" + default: + panic("Bad enum value for AttachmentContentEncoding") + } +} + +type PickleStepType string + +const ( + PickleStepType_UNKNOWN PickleStepType = "Unknown" + PickleStepType_CONTEXT PickleStepType = "Context" + PickleStepType_ACTION PickleStepType = "Action" + PickleStepType_OUTCOME PickleStepType = "Outcome" +) + +func (e PickleStepType) String() string { + switch e { + case PickleStepType_UNKNOWN: + return "Unknown" + case PickleStepType_CONTEXT: + return "Context" + case PickleStepType_ACTION: + return "Action" + case PickleStepType_OUTCOME: + return "Outcome" + default: + panic("Bad enum value for PickleStepType") + } +} + +type SourceMediaType string + +const ( + SourceMediaType_TEXT_X_CUCUMBER_GHERKIN_PLAIN SourceMediaType = "text/x.cucumber.gherkin+plain" + SourceMediaType_TEXT_X_CUCUMBER_GHERKIN_MARKDOWN SourceMediaType = "text/x.cucumber.gherkin+markdown" +) + +func (e SourceMediaType) String() string { + switch e { + case SourceMediaType_TEXT_X_CUCUMBER_GHERKIN_PLAIN: + return "text/x.cucumber.gherkin+plain" + case SourceMediaType_TEXT_X_CUCUMBER_GHERKIN_MARKDOWN: + return "text/x.cucumber.gherkin+markdown" + default: + panic("Bad enum value for SourceMediaType") + } +} + +type StepDefinitionPatternType string + +const ( + StepDefinitionPatternType_CUCUMBER_EXPRESSION StepDefinitionPatternType = "CUCUMBER_EXPRESSION" + StepDefinitionPatternType_REGULAR_EXPRESSION StepDefinitionPatternType = "REGULAR_EXPRESSION" +) + +func (e StepDefinitionPatternType) String() string { + switch e { + case StepDefinitionPatternType_CUCUMBER_EXPRESSION: + return "CUCUMBER_EXPRESSION" + case StepDefinitionPatternType_REGULAR_EXPRESSION: + return "REGULAR_EXPRESSION" + default: + panic("Bad enum value for StepDefinitionPatternType") + } +} + +type StepKeywordType string + +const ( + StepKeywordType_UNKNOWN StepKeywordType = "Unknown" + StepKeywordType_CONTEXT StepKeywordType = "Context" + StepKeywordType_ACTION StepKeywordType = "Action" + StepKeywordType_OUTCOME StepKeywordType = "Outcome" + StepKeywordType_CONJUNCTION StepKeywordType = "Conjunction" +) + +func (e StepKeywordType) String() string { + switch e { + case StepKeywordType_UNKNOWN: + return "Unknown" + case StepKeywordType_CONTEXT: + return "Context" + case StepKeywordType_ACTION: + return "Action" + case StepKeywordType_OUTCOME: + return "Outcome" + case StepKeywordType_CONJUNCTION: + return "Conjunction" + default: + panic("Bad enum value for StepKeywordType") + } +} + +type TestStepResultStatus string + +const ( + TestStepResultStatus_UNKNOWN TestStepResultStatus = "UNKNOWN" + TestStepResultStatus_PASSED TestStepResultStatus = "PASSED" + TestStepResultStatus_SKIPPED TestStepResultStatus = "SKIPPED" + TestStepResultStatus_PENDING TestStepResultStatus = "PENDING" + TestStepResultStatus_UNDEFINED TestStepResultStatus = "UNDEFINED" + TestStepResultStatus_AMBIGUOUS TestStepResultStatus = "AMBIGUOUS" + TestStepResultStatus_FAILED TestStepResultStatus = "FAILED" +) + +func (e TestStepResultStatus) String() string { + switch e { + case TestStepResultStatus_UNKNOWN: + return "UNKNOWN" + case TestStepResultStatus_PASSED: + return "PASSED" + case TestStepResultStatus_SKIPPED: + return "SKIPPED" + case TestStepResultStatus_PENDING: + return "PENDING" + case TestStepResultStatus_UNDEFINED: + return "UNDEFINED" + case TestStepResultStatus_AMBIGUOUS: + return "AMBIGUOUS" + case TestStepResultStatus_FAILED: + return "FAILED" + default: + panic("Bad enum value for TestStepResultStatus") + } +} diff --git a/vendor/github.com/cucumber/messages/go/v21/time_conversion.go b/vendor/github.com/cucumber/messages/go/v21/time_conversion.go new file mode 100644 index 000000000..3a387931e --- /dev/null +++ b/vendor/github.com/cucumber/messages/go/v21/time_conversion.go @@ -0,0 +1,34 @@ +package messages + +import "time" + +const nanosPerSecond = 1000000000 + +func DurationToGoDuration(duration Duration) time.Duration { + secondNanos := duration.Seconds * nanosPerSecond + return time.Duration(secondNanos + int64(duration.Nanos)) +} + +func GoDurationToDuration(goDuration time.Duration) Duration { + seconds := int64(goDuration / nanosPerSecond) + nanos := int64(goDuration % nanosPerSecond) + return Duration{ + Seconds: seconds, + Nanos: nanos, + } +} + +func TimestampToGoTime(timestamp Timestamp) time.Time { + return time.Unix(timestamp.Seconds, timestamp.Nanos) +} + +func GoTimeToTimestamp(t time.Time) Timestamp { + unixNanos := t.UnixNano() + seconds := unixNanos / nanosPerSecond + nanos := unixNanos % nanosPerSecond + + return Timestamp{ + Seconds: seconds, + Nanos: nanos, + } +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/CHANGELOG.md b/vendor/github.com/cyphar/filepath-securejoin/CHANGELOG.md index 734cf61e3..6d016d05c 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/CHANGELOG.md +++ b/vendor/github.com/cyphar/filepath-securejoin/CHANGELOG.md @@ -6,49 +6,39 @@ and this project adheres to [Semantic Versioning](http://semver.org/). ## [Unreleased] ## -## [0.6.0] - 2025-11-03 ## +## [0.6.1] - 2025-11-19 ## -> By the Power of Greyskull! +> At last up jumped the cunning spider, and fiercely held her fast. + +### Fixed ### +- Our logic for deciding whether to use `openat2(2)` or fallback to an `O_PATH` + resolver would cache the result to avoid doing needless test runs of + `openat2(2)`. However, this causes issues when `pathrs-lite` is being used by + a program that applies new seccomp-bpf filters onto itself -- if the filter + denies `openat2(2)` then we would return that error rather than falling back + to the `O_PATH` resolver. To resolve this issue, we no longer cache the + result if `openat2(2)` was successful, only if there was an error. +- A file descriptor leak in our `openat2` wrapper (when doing the necessary + `dup` for `RESOLVE_IN_ROOT`) has been removed. -While quite small code-wise, this release marks a very key point in the -development of filepath-securejoin. - -filepath-securejoin was originally intended (back in 2017) to simply be a -single-purpose library that would take some common code used in container -runtimes (specifically, Docker's `FollowSymlinksInScope`) and make it more -general-purpose (with the eventual goals of it ending up in the Go stdlib). - -Of course, I quickly discovered that this problem was actually far more -complicated to solve when dealing with racing attackers, which lead to me -developing `openat2(2)` and [libpathrs][]. I had originally planned for -libpathrs to completely replace filepath-securejoin "once it was ready" but in -the interim we needed to fix several race attacks in runc as part of security -advisories. Obviously we couldn't require the usage of a pre-0.1 Rust library -in runc so it was necessary to port bits of libpathrs into filepath-securejoin. -(Ironically the first prototypes of libpathrs were originally written in Go and -then rewritten to Rust, so the code in filepath-securejoin is actually Go code -that was rewritten to Rust then re-rewritten to Go.) - -It then became clear that pure-Go libraries will likely not be willing to -require CGo for all of their builds, so it was necessary to accept that -filepath-securejoin will need to stay. As such, in v0.5.0 we provided more -pure-Go implementations of features from libpathrs but moved them into -`pathrs-lite` subpackage to clarify what purpose these helpers serve. - -This release finally closes the loop and makes it so that pathrs-lite can -transparently use libpathrs (via a `libpathrs` build-tag). This means that -upstream libraries can use the pure Go version if they prefer, but downstreams -(either downstream library users or even downstream distributions) are able to -migrate to libpathrs for all usages of pathrs-lite in an entire Go binary. - -I should make it clear that I do not plan to port the rest of libpathrs to Go, -as I do not wish to maintain two copies of the same codebase. pathrs-lite -already provides the core essentials necessary to operate on paths safely for -most modern systems. Users who want additional hardening or more ergonomic APIs -are free to use [`cyphar.com/go-pathrs`][go-pathrs] (libpathrs's Go bindings). +## [0.5.2] - 2025-11-19 ## -[libpathrs]: https://github.com/cyphar/libpathrs -[go-pathrs]: https://cyphar.com/go-pathrs +> "Will you walk into my parlour?" said a spider to a fly. + +### Fixed ### +- Our logic for deciding whether to use `openat2(2)` or fallback to an `O_PATH` + resolver would cache the result to avoid doing needless test runs of + `openat2(2)`. However, this causes issues when `pathrs-lite` is being used by + a program that applies new seccomp-bpf filters onto itself -- if the filter + denies `openat2(2)` then we would return that error rather than falling back + to the `O_PATH` resolver. To resolve this issue, we no longer cache the + result if `openat2(2)` was successful, only if there was an error. +- A file descriptor leak in our `openat2` wrapper (when doing the necessary + `dup` for `RESOLVE_IN_ROOT`) has been removed. + +## [0.6.0] - 2025-11-03 ## + +> By the Power of Greyskull! ### Breaking ### - The deprecated `MkdirAll`, `MkdirAllHandle`, `OpenInRoot`, `OpenatInRoot` and @@ -56,12 +46,12 @@ are free to use [`cyphar.com/go-pathrs`][go-pathrs] (libpathrs's Go bindings). directly. ### Added ### -- `pathrs-lite` now has support for using [libpathrs][libpathrs] as a backend. - This is opt-in and can be enabled at build time with the `libpathrs` build - tag. The intention is to allow for downstream libraries and other projects to - make use of the pure-Go `github.com/cyphar/filepath-securejoin/pathrs-lite` - package and distributors can then opt-in to using `libpathrs` for the entire - binary if they wish. +- `pathrs-lite` now has support for using libpathrs as a backend. This is + opt-in and can be enabled at build time with the `libpathrs` build tag. The + intention is to allow for downstream libraries and other projects to make use + of the pure-Go `github.com/cyphar/filepath-securejoin/pathrs-lite` package + and distributors can then opt-in to using `libpathrs` for the entire binary + if they wish. ## [0.5.1] - 2025-10-31 ## @@ -440,8 +430,10 @@ This is our first release of `github.com/cyphar/filepath-securejoin`, containing a full implementation with a coverage of 93.5% (the only missing cases are the error cases, which are hard to mocktest at the moment). -[Unreleased]: https://github.com/cyphar/filepath-securejoin/compare/v0.6.0...HEAD -[0.6.0]: https://github.com/cyphar/filepath-securejoin/compare/v0.5.1...v0.6.0 +[Unreleased]: https://github.com/cyphar/filepath-securejoin/compare/v0.6.1...HEAD +[0.6.1]: https://github.com/cyphar/filepath-securejoin/compare/v0.6.0...v0.6.1 +[0.6.0]: https://github.com/cyphar/filepath-securejoin/compare/v0.5.0...v0.6.0 +[0.5.2]: https://github.com/cyphar/filepath-securejoin/compare/v0.5.1...v0.5.2 [0.5.1]: https://github.com/cyphar/filepath-securejoin/compare/v0.5.0...v0.5.1 [0.5.0]: https://github.com/cyphar/filepath-securejoin/compare/v0.4.1...v0.5.0 [0.4.1]: https://github.com/cyphar/filepath-securejoin/compare/v0.4.0...v0.4.1 diff --git a/vendor/github.com/cyphar/filepath-securejoin/VERSION b/vendor/github.com/cyphar/filepath-securejoin/VERSION index a918a2aa1..ee6cdce3c 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/VERSION +++ b/vendor/github.com/cyphar/filepath-securejoin/VERSION @@ -1 +1 @@ -0.6.0 +0.6.1 diff --git a/vendor/github.com/go-openapi/jsonpointer/.cliff.toml b/vendor/github.com/go-openapi/jsonpointer/.cliff.toml new file mode 100644 index 000000000..ae70028b7 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/.cliff.toml @@ -0,0 +1,181 @@ +# git-cliff ~ configuration file +# https://git-cliff.org/docs/configuration + +[changelog] +header = """ +""" + +footer = """ + +----- + +**[{{ remote.github.repo }}]({{ self::remote_url() }}) license terms** + +[![License][license-badge]][license-url] + +[license-badge]: http://img.shields.io/badge/license-Apache%20v2-orange.svg +[license-url]: {{ self::remote_url() }}/?tab=Apache-2.0-1-ov-file#readme + +{%- macro remote_url() -%} + https://github.com/{{ remote.github.owner }}/{{ remote.github.repo }} +{%- endmacro -%} +""" + +body = """ +{%- if version %} +## [{{ version | trim_start_matches(pat="v") }}]({{ self::remote_url() }}/tree/{{ version }}) - {{ timestamp | date(format="%Y-%m-%d") }} +{%- else %} +## [unreleased] +{%- endif %} +{%- if message %} + {%- raw %}\n{% endraw %} +{{ message }} + {%- raw %}\n{% endraw %} +{%- endif %} +{%- if version %} + {%- if previous.version %} + +**Full Changelog**: <{{ self::remote_url() }}/compare/{{ previous.version }}...{{ version }}> + {%- endif %} +{%- else %} + {%- raw %}\n{% endraw %} +{%- endif %} + +{%- if statistics %}{% if statistics.commit_count %} + {%- raw %}\n{% endraw %} +{{ statistics.commit_count }} commits in this release. + {%- raw %}\n{% endraw %} +{%- endif %}{% endif %} +----- + +{%- for group, commits in commits | group_by(attribute="group") %} + {%- raw %}\n{% endraw %} +### {{ group | upper_first }} + {%- raw %}\n{% endraw %} + {%- for commit in commits %} + {%- if commit.remote.pr_title %} + {%- set commit_message = commit.remote.pr_title %} + {%- else %} + {%- set commit_message = commit.message %} + {%- endif %} +* {{ commit_message | split(pat="\n") | first | trim }} + {%- if commit.remote.username %} +{%- raw %} {% endraw %}by [@{{ commit.remote.username }}](https://github.com/{{ commit.remote.username }}) + {%- endif %} + {%- if commit.remote.pr_number %} +{%- raw %} {% endraw %}in [#{{ commit.remote.pr_number }}]({{ self::remote_url() }}/pull/{{ commit.remote.pr_number }}) + {%- endif %} +{%- raw %} {% endraw %}[...]({{ self::remote_url() }}/commit/{{ commit.id }}) + {%- endfor %} +{%- endfor %} + +{%- if github %} +{%- raw %}\n{% endraw -%} + {%- set all_contributors = github.contributors | length %} + {%- if github.contributors | filter(attribute="username", value="dependabot[bot]") | length < all_contributors %} +----- + +### People who contributed to this release + {% endif %} + {%- for contributor in github.contributors | filter(attribute="username") | sort(attribute="username") %} + {%- if contributor.username != "dependabot[bot]" %} +* [@{{ contributor.username }}](https://github.com/{{ contributor.username }}) + {%- endif %} + {%- endfor %} + + {% if github.contributors | filter(attribute="is_first_time", value=true) | length != 0 %} +----- + {%- raw %}\n{% endraw %} + +### New Contributors + {%- endif %} + + {%- for contributor in github.contributors | filter(attribute="is_first_time", value=true) %} + {%- if contributor.username != "dependabot[bot]" %} +* @{{ contributor.username }} made their first contribution + {%- if contributor.pr_number %} + in [#{{ contributor.pr_number }}]({{ self::remote_url() }}/pull/{{ contributor.pr_number }}) \ + {%- endif %} + {%- endif %} + {%- endfor %} +{%- endif %} + +{%- raw %}\n{% endraw %} + +{%- macro remote_url() -%} + https://github.com/{{ remote.github.owner }}/{{ remote.github.repo }} +{%- endmacro -%} +""" +# Remove leading and trailing whitespaces from the changelog's body. +trim = true +# Render body even when there are no releases to process. +render_always = true +# An array of regex based postprocessors to modify the changelog. +postprocessors = [ + # Replace the placeholder with a URL. + #{ pattern = '', replace = "https://github.com/orhun/git-cliff" }, +] +# output file path +# output = "test.md" + +[git] +# Parse commits according to the conventional commits specification. +# See https://www.conventionalcommits.org +conventional_commits = false +# Exclude commits that do not match the conventional commits specification. +filter_unconventional = false +# Require all commits to be conventional. +# Takes precedence over filter_unconventional. +require_conventional = false +# Split commits on newlines, treating each line as an individual commit. +split_commits = false +# An array of regex based parsers to modify commit messages prior to further processing. +commit_preprocessors = [ + # Replace issue numbers with link templates to be updated in `changelog.postprocessors`. + #{ pattern = '\((\w+\s)?#([0-9]+)\)', replace = "([#${2}](/issues/${2}))"}, + # Check spelling of the commit message using https://github.com/crate-ci/typos. + # If the spelling is incorrect, it will be fixed automatically. + #{ pattern = '.*', replace_command = 'typos --write-changes -' } +] +# Prevent commits that are breaking from being excluded by commit parsers. +protect_breaking_commits = false +# An array of regex based parsers for extracting data from the commit message. +# Assigns commits to groups. +# Optionally sets the commit's scope and can decide to exclude commits from further processing. +commit_parsers = [ + { message = "^[Cc]hore\\([Rr]elease\\): prepare for", skip = true }, + { message = "(^[Mm]erge)|([Mm]erge conflict)", skip = true }, + { field = "author.name", pattern = "dependabot*", group = "Updates" }, + { message = "([Ss]ecurity)|([Vv]uln)", group = "Security" }, + { body = "(.*[Ss]ecurity)|([Vv]uln)", group = "Security" }, + { message = "([Cc]hore\\(lint\\))|(style)|(lint)|(codeql)|(golangci)", group = "Code quality" }, + { message = "(^[Dd]oc)|((?i)readme)|(badge)|(typo)|(documentation)", group = "Documentation" }, + { message = "(^[Ff]eat)|(^[Ee]nhancement)", group = "Implemented enhancements" }, + { message = "(^ci)|(\\(ci\\))|(fixup\\s+ci)|(fix\\s+ci)|(license)|(example)", group = "Miscellaneous tasks" }, + { message = "^test", group = "Testing" }, + { message = "(^fix)|(panic)", group = "Fixed bugs" }, + { message = "(^refact)|(rework)", group = "Refactor" }, + { message = "(^[Pp]erf)|(performance)", group = "Performance" }, + { message = "(^[Cc]hore)", group = "Miscellaneous tasks" }, + { message = "^[Rr]evert", group = "Reverted changes" }, + { message = "(upgrade.*?go)|(go\\s+version)", group = "Updates" }, + { message = ".*", group = "Other" }, +] +# Exclude commits that are not matched by any commit parser. +filter_commits = false +# An array of link parsers for extracting external references, and turning them into URLs, using regex. +link_parsers = [] +# Include only the tags that belong to the current branch. +use_branch_tags = false +# Order releases topologically instead of chronologically. +topo_order = false +# Order releases topologically instead of chronologically. +topo_order_commits = true +# Order of commits in each group/release within the changelog. +# Allowed values: newest, oldest +sort_commits = "newest" +# Process submodules commits +recurse_submodules = false + +#[remote.github] +#owner = "go-openapi" diff --git a/vendor/github.com/go-openapi/jsonpointer/.gitignore b/vendor/github.com/go-openapi/jsonpointer/.gitignore index 769c24400..59cd29489 100644 --- a/vendor/github.com/go-openapi/jsonpointer/.gitignore +++ b/vendor/github.com/go-openapi/jsonpointer/.gitignore @@ -1 +1,4 @@ -secrets.yml +*.out +*.cov +.idea +.env diff --git a/vendor/github.com/go-openapi/jsonpointer/.golangci.yml b/vendor/github.com/go-openapi/jsonpointer/.golangci.yml index 568ce16d7..fdae591bc 100644 --- a/vendor/github.com/go-openapi/jsonpointer/.golangci.yml +++ b/vendor/github.com/go-openapi/jsonpointer/.golangci.yml @@ -2,34 +2,17 @@ version: "2" linters: default: all disable: - - cyclop - depguard - - errchkjson - - errorlint - - exhaustruct - - forcetypeassert - funlen - - gochecknoglobals - - gochecknoinits - - gocognit - - godot - godox - - gosmopolitan - - inamedparam - - intrange # disabled while < go1.22 - - ireturn - - lll - - musttag - - nestif + - exhaustruct - nlreturn - nonamedreturns - noinlineerr - paralleltest - recvcheck - testpackage - - thelper - tparallel - - unparam - varnamelen - whitespace - wrapcheck @@ -41,8 +24,15 @@ linters: goconst: min-len: 2 min-occurrences: 3 + cyclop: + max-complexity: 20 gocyclo: - min-complexity: 45 + min-complexity: 20 + exhaustive: + default-signifies-exhaustive: true + default-case-required: true + lll: + line-length: 180 exclusions: generated: lax presets: @@ -58,6 +48,7 @@ formatters: enable: - gofmt - goimports + - gofumpt exclusions: generated: lax paths: diff --git a/vendor/github.com/go-openapi/jsonpointer/CONTRIBUTORS.md b/vendor/github.com/go-openapi/jsonpointer/CONTRIBUTORS.md new file mode 100644 index 000000000..aace4fcfb --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/CONTRIBUTORS.md @@ -0,0 +1,24 @@ +# Contributors + +- Repository: ['go-openapi/jsonpointer'] + +| Total Contributors | Total Contributions | +| --- | --- | +| 12 | 90 | + +| Username | All Time Contribution Count | All Commits | +| --- | --- | --- | +| @fredbi | 43 | https://github.com/go-openapi/jsonpointer/commits?author=fredbi | +| @casualjim | 33 | https://github.com/go-openapi/jsonpointer/commits?author=casualjim | +| @magodo | 3 | https://github.com/go-openapi/jsonpointer/commits?author=magodo | +| @youyuanwu | 3 | https://github.com/go-openapi/jsonpointer/commits?author=youyuanwu | +| @gaiaz-iusipov | 1 | https://github.com/go-openapi/jsonpointer/commits?author=gaiaz-iusipov | +| @gbjk | 1 | https://github.com/go-openapi/jsonpointer/commits?author=gbjk | +| @gordallott | 1 | https://github.com/go-openapi/jsonpointer/commits?author=gordallott | +| @ianlancetaylor | 1 | https://github.com/go-openapi/jsonpointer/commits?author=ianlancetaylor | +| @mfleader | 1 | https://github.com/go-openapi/jsonpointer/commits?author=mfleader | +| @Neo2308 | 1 | https://github.com/go-openapi/jsonpointer/commits?author=Neo2308 | +| @olivierlemasle | 1 | https://github.com/go-openapi/jsonpointer/commits?author=olivierlemasle | +| @testwill | 1 | https://github.com/go-openapi/jsonpointer/commits?author=testwill | + + _this file was generated by the [Contributors GitHub Action](https://github.com/github/contributors)_ diff --git a/vendor/github.com/go-openapi/jsonpointer/LICENSE b/vendor/github.com/go-openapi/jsonpointer/LICENSE index d64569567..261eeb9e9 100644 --- a/vendor/github.com/go-openapi/jsonpointer/LICENSE +++ b/vendor/github.com/go-openapi/jsonpointer/LICENSE @@ -1,4 +1,3 @@ - Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ diff --git a/vendor/github.com/go-openapi/jsonpointer/NOTICE b/vendor/github.com/go-openapi/jsonpointer/NOTICE new file mode 100644 index 000000000..f3b51939a --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/NOTICE @@ -0,0 +1,39 @@ +Copyright 2015-2025 go-swagger maintainers + +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +This software library, github.com/go-openapi/jsonpointer, includes software developed +by the go-swagger and go-openapi maintainers ("go-swagger maintainers"). + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this software except in compliance with the License. + +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0. + +This software is copied from, derived from, and inspired by other original software products. +It ships with copies of other software which license terms are recalled below. + +The original software was authored on 25-02-2013 by sigu-399 (https://github.com/sigu-399, sigu.399@gmail.com). + +github.com/sigh-399/jsonpointer +=========================== + +// SPDX-FileCopyrightText: Copyright 2013 sigu-399 ( https://github.com/sigu-399 ) +// SPDX-License-Identifier: Apache-2.0 + +Copyright 2013 sigu-399 ( https://github.com/sigu-399 ) + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/go-openapi/jsonpointer/README.md b/vendor/github.com/go-openapi/jsonpointer/README.md index 0108f1d57..00cbfd741 100644 --- a/vendor/github.com/go-openapi/jsonpointer/README.md +++ b/vendor/github.com/go-openapi/jsonpointer/README.md @@ -1,19 +1,138 @@ -# gojsonpointer [![Build Status](https://github.com/go-openapi/jsonpointer/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/jsonpointer/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/jsonpointer/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonpointer) +# jsonpointer -[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonpointer/master/LICENSE) -[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/jsonpointer.svg)](https://pkg.go.dev/github.com/go-openapi/jsonpointer) -[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/jsonpointer)](https://goreportcard.com/report/github.com/go-openapi/jsonpointer) + +[![Tests][test-badge]][test-url] [![Coverage][cov-badge]][cov-url] [![CI vuln scan][vuln-scan-badge]][vuln-scan-url] [![CodeQL][codeql-badge]][codeql-url] + + + +[![Release][release-badge]][release-url] [![Go Report Card][gocard-badge]][gocard-url] [![CodeFactor Grade][codefactor-badge]][codefactor-url] [![License][license-badge]][license-url] + + + +[![GoDoc][godoc-badge]][godoc-url] [![go version][goversion-badge]][goversion-url] ![Top language][top-badge] ![Commits since latest release][commits-badge] -An implementation of JSON Pointer - Go language +--- + +An implementation of JSON Pointer for golang, which supports go `struct`. ## Status -Completed YES -Tested YES +API is stable. + +## Import this library in your project + +```cmd +go get github.com/go-openapi/jsonpointer +``` + +## Basic usage + +See also some [examples](./examples_test.go) + +### Retrieving a value + +```go + import ( + "github.com/go-openapi/jsonpointer" + ) + + + var doc any + + ... + + pointer, err := jsonpointer.New("/foo/1") + if err != nil { + ... // error: e.g. invalid JSON pointer specification + } + + value, kind, err := pointer.Get(doc) + if err != nil { + ... // error: e.g. key not found, index out of bounds, etc. + } + + ... +``` + +### Setting a value + +```go + ... + var doc any + ... + pointer, err := jsonpointer.New("/foo/1") + if err != nil { + ... // error: e.g. invalid JSON pointer specification + } + + doc, err = p.Set(doc, "value") + if err != nil { + ... // error: e.g. key not found, index out of bounds, etc. + } +``` + +## Change log + +See ## References -http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 -### Note -The 4.Evaluation part of the previous reference, starting with 'If the currently referenced value is a JSON array, the reference token MUST contain either...' is not implemented. + + +also known as [RFC6901](https://www.rfc-editor.org/rfc/rfc6901) + +## Licensing + +This library ships under the [SPDX-License-Identifier: Apache-2.0](./LICENSE). + +See the license [NOTICE](./NOTICE), which recalls the licensing terms of all the pieces of software +on top of which it has been built. + +## Limitations + +The 4.Evaluation part of the previous reference, starting with 'If the currently referenced value is a JSON array, +the reference token MUST contain either...' is not implemented. + +That is because our implementation of the JSON pointer only supports explicit references to array elements: +the provision in the spec to resolve non-existent members as "the last element in the array", +using the special trailing character "-" is not implemented. + +## Other documentation + +* [All-time contributors](./CONTRIBUTORS.md) +* [Contributing guidelines](.github/CONTRIBUTING.md) +* [Maintainers documentation](docs/MAINTAINERS.md) +* [Code style](docs/STYLE.md) + + +[test-badge]: https://github.com/go-openapi/jsonpointer/actions/workflows/go-test.yml/badge.svg +[test-url]: https://github.com/go-openapi/jsonpointer/actions/workflows/go-test.yml +[cov-badge]: https://codecov.io/gh/go-openapi/jsonpointer/branch/master/graph/badge.svg +[cov-url]: https://codecov.io/gh/go-openapi/jsonpointer +[vuln-scan-badge]: https://github.com/go-openapi/jsonpointer/actions/workflows/scanner.yml/badge.svg +[vuln-scan-url]: https://github.com/go-openapi/jsonpointer/actions/workflows/scanner.yml +[codeql-badge]: https://github.com/go-openapi/jsonpointer/actions/workflows/codeql.yml/badge.svg +[codeql-url]: https://github.com/go-openapi/jsonpointer/actions/workflows/codeql.yml + +[release-badge]: https://badge.fury.io/go/github.com%2Fgo-openapi%2Fjsonpointer.svg +[release-url]: https://badge.fury.io/go/github.com%2Fgo-openapi%2Fjsonpointer + +[gocard-badge]: https://goreportcard.com/badge/github.com/go-openapi/jsonpointer +[gocard-url]: https://goreportcard.com/report/github.com/go-openapi/jsonpointer +[codefactor-badge]: https://img.shields.io/codefactor/grade/github/go-openapi/jsonpointer +[codefactor-url]: https://www.codefactor.io/repository/github/go-openapi/jsonpointer + +[doc-badge]: https://img.shields.io/badge/doc-site-blue?link=https%3A%2F%2Fgoswagger.io%2Fgo-openapi%2F +[doc-url]: https://goswagger.io/go-openapi +[godoc-badge]: https://pkg.go.dev/badge/github.com/go-openapi/jsonpointer +[godoc-url]: http://pkg.go.dev/github.com/go-openapi/jsonpointer +[slack-badge]: https://slackin.goswagger.io/badge.svg +[slack-url]: https://slackin.goswagger.io + +[license-badge]: http://img.shields.io/badge/license-Apache%20v2-orange.svg +[license-url]: https://github.com/go-openapi/jsonpointer/?tab=Apache-2.0-1-ov-file#readme + +[goversion-badge]: https://img.shields.io/github/go-mod/go-version/go-openapi/jsonpointer +[goversion-url]: https://github.com/go-openapi/jsonpointer/blob/master/go.mod +[top-badge]: https://img.shields.io/github/languages/top/go-openapi/jsonpointer +[commits-badge]: https://img.shields.io/github/commits-since/go-openapi/jsonpointer/latest diff --git a/vendor/github.com/go-openapi/jsonpointer/SECURITY.md b/vendor/github.com/go-openapi/jsonpointer/SECURITY.md new file mode 100644 index 000000000..2a7b6f091 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonpointer/SECURITY.md @@ -0,0 +1,19 @@ +# Security Policy + +This policy outlines the commitment and practices of the go-openapi maintainers regarding security. + +## Supported Versions + +| Version | Supported | +| ------- | ------------------ | +| 0.22.x | :white_check_mark: | + +## Reporting a vulnerability + +If you become aware of a security vulnerability that affects the current repository, +please report it privately to the maintainers. + +Please follow the instructions provided by github to +[Privately report a security vulnerability](https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing-information-about-vulnerabilities/privately-reporting-a-security-vulnerability#privately-reporting-a-security-vulnerability). + +TL;DR: on Github, navigate to the project's "Security" tab then click on "Report a vulnerability". diff --git a/vendor/github.com/go-openapi/jsonpointer/errors.go b/vendor/github.com/go-openapi/jsonpointer/errors.go index b84343d9d..8c50dde8b 100644 --- a/vendor/github.com/go-openapi/jsonpointer/errors.go +++ b/vendor/github.com/go-openapi/jsonpointer/errors.go @@ -1,5 +1,10 @@ +// SPDX-FileCopyrightText: Copyright (c) 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + package jsonpointer +import "fmt" + type pointerError string func (e pointerError) Error() string { @@ -7,12 +12,24 @@ func (e pointerError) Error() string { } const ( - // ErrPointer is an error raised by the jsonpointer package + // ErrPointer is a sentinel error raised by all errors from this package. ErrPointer pointerError = "JSON pointer error" - // ErrInvalidStart states that a JSON pointer must start with a separator ("/") + // ErrInvalidStart states that a JSON pointer must start with a separator ("/"). ErrInvalidStart pointerError = `JSON pointer must be empty or start with a "` + pointerSeparator - // ErrUnsupportedValueType indicates that a value of the wrong type is being set + // ErrUnsupportedValueType indicates that a value of the wrong type is being set. ErrUnsupportedValueType pointerError = "only structs, pointers, maps and slices are supported for setting values" ) + +func errNoKey(key string) error { + return fmt.Errorf("object has no key %q: %w", key, ErrPointer) +} + +func errOutOfBounds(length, idx int) error { + return fmt.Errorf("index out of bounds array[0,%d] index '%d': %w", length-1, idx, ErrPointer) +} + +func errInvalidReference(token string) error { + return fmt.Errorf("invalid token reference %q: %w", token, ErrPointer) +} diff --git a/vendor/github.com/go-openapi/jsonpointer/pointer.go b/vendor/github.com/go-openapi/jsonpointer/pointer.go index afc8a7840..7df49af3b 100644 --- a/vendor/github.com/go-openapi/jsonpointer/pointer.go +++ b/vendor/github.com/go-openapi/jsonpointer/pointer.go @@ -1,28 +1,7 @@ -// Copyright 2013 sigu-399 ( https://github.com/sigu-399 ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author sigu-399 -// author-github https://github.com/sigu-399 -// author-mail sigu.399@gmail.com -// -// repository-name jsonpointer -// repository-desc An implementation of JSON Pointer - Go language -// -// description Main and unique file. -// -// created 25-02-2013 +// SPDX-FileCopyrightText: Copyright (c) 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 +// Package jsonpointer provides a golang implementation for json pointers. package jsonpointer import ( @@ -41,70 +20,93 @@ const ( pointerSeparator = `/` ) -var jsonPointableType = reflect.TypeOf(new(JSONPointable)).Elem() -var jsonSetableType = reflect.TypeOf(new(JSONSetable)).Elem() - -// JSONPointable is an interface for structs to implement when they need to customize the -// json pointer process +// JSONPointable is an interface for structs to implement, +// when they need to customize the json pointer process or want to avoid the use of reflection. type JSONPointable interface { - JSONLookup(string) (any, error) + // JSONLookup returns a value pointed at this (unescaped) key. + JSONLookup(key string) (any, error) } -// JSONSetable is an interface for structs to implement when they need to customize the -// json pointer process +// JSONSetable is an interface for structs to implement, +// when they need to customize the json pointer process or want to avoid the use of reflection. type JSONSetable interface { - JSONSet(string, any) error + // JSONSet sets the value pointed at the (unescaped) key. + JSONSet(key string, value any) error } -// Pointer the json pointer reprsentation +// Pointer is a representation of a json pointer. +// +// Use [Pointer.Get] to retrieve a value or [Pointer.Set] to set a value. +// +// It works with any go type interpreted as a JSON document, which means: +// +// - if a type implements [JSONPointable], its [JSONPointable.JSONLookup] method is used to resolve [Pointer.Get] +// - if a type implements [JSONSetable], its [JSONPointable.JSONSet] method is used to resolve [Pointer.Set] +// - a go map[K]V is interpreted as an object, with type K assignable to a string +// - a go slice []T is interpreted as an array +// - a go struct is interpreted as an object, with exported fields interpreted as keys +// - promoted fields from an embedded struct are traversed +// - scalars (e.g. int, float64 ...), channels, functions and go arrays cannot be traversed +// +// For struct s resolved by reflection, key mappings honor the conventional struct tag `json`. +// +// Fields that do not specify a `json` tag, or specify an empty one, or are tagged as `json:"-"` are ignored. +// +// # Limitations +// +// - Unlike go standard marshaling, untagged fields do not default to the go field name and are ignored. +// - anonymous fields are not traversed if untagged type Pointer struct { referenceTokens []string } -// New creates a new json pointer for the given string +// New creates a new json pointer from its string representation. func New(jsonPointerString string) (Pointer, error) { - var p Pointer err := p.parse(jsonPointerString) - return p, err + return p, err } -// Get uses the pointer to retrieve a value from a JSON document +// Get uses the pointer to retrieve a value from a JSON document. +// +// It returns the value with its type as a [reflect.Kind] or an error. func (p *Pointer) Get(document any) (any, reflect.Kind, error) { return p.get(document, jsonname.DefaultJSONNameProvider) } -// Set uses the pointer to set a value from a JSON document +// Set uses the pointer to set a value from a data type +// that represent a JSON document. +// +// It returns the updated document. func (p *Pointer) Set(document any, value any) (any, error) { return document, p.set(document, value, jsonname.DefaultJSONNameProvider) } -// DecodedTokens returns the decoded tokens +// DecodedTokens returns the decoded (unescaped) tokens of this JSON pointer. func (p *Pointer) DecodedTokens() []string { result := make([]string, 0, len(p.referenceTokens)) - for _, t := range p.referenceTokens { - result = append(result, Unescape(t)) + for _, token := range p.referenceTokens { + result = append(result, Unescape(token)) } + return result } -// IsEmpty returns true if this is an empty json pointer -// this indicates that it points to the root document +// IsEmpty returns true if this is an empty json pointer. +// +// This indicates that it points to the root document. func (p *Pointer) IsEmpty() bool { return len(p.referenceTokens) == 0 } -// Pointer to string representation function +// String representation of a pointer. func (p *Pointer) String() string { - if len(p.referenceTokens) == 0 { return emptyPointer } - pointerString := pointerSeparator + strings.Join(p.referenceTokens, pointerSeparator) - - return pointerString + return pointerSeparator + strings.Join(p.referenceTokens, pointerSeparator) } func (p *Pointer) Offset(document string) (int64, error) { @@ -138,20 +140,21 @@ func (p *Pointer) Offset(document string) (int64, error) { return offset, nil } -// "Constructor", parses the given string JSON pointer +// "Constructor", parses the given string JSON pointer. func (p *Pointer) parse(jsonPointerString string) error { - var err error - - if jsonPointerString != emptyPointer { - if !strings.HasPrefix(jsonPointerString, pointerSeparator) { - err = errors.Join(ErrInvalidStart, ErrPointer) - } else { - referenceTokens := strings.Split(jsonPointerString, pointerSeparator) - p.referenceTokens = append(p.referenceTokens, referenceTokens[1:]...) - } + if jsonPointerString == emptyPointer { + return nil } - return err + if !strings.HasPrefix(jsonPointerString, pointerSeparator) { + // non empty pointer must start with "/" + return errors.Join(ErrInvalidStart, ErrPointer) + } + + referenceTokens := strings.Split(jsonPointerString, pointerSeparator) + p.referenceTokens = append(p.referenceTokens, referenceTokens[1:]...) + + return nil } func (p *Pointer) get(node any, nameProvider *jsonname.NameProvider) (any, reflect.Kind, error) { @@ -161,7 +164,7 @@ func (p *Pointer) get(node any, nameProvider *jsonname.NameProvider) (any, refle kind := reflect.Invalid - // Full document when empty + // full document when empty if len(p.referenceTokens) == 0 { return node, kind, nil } @@ -185,103 +188,105 @@ func (p *Pointer) get(node any, nameProvider *jsonname.NameProvider) (any, refle func (p *Pointer) set(node, data any, nameProvider *jsonname.NameProvider) error { knd := reflect.ValueOf(node).Kind() - if knd != reflect.Ptr && knd != reflect.Struct && knd != reflect.Map && knd != reflect.Slice && knd != reflect.Array { + if knd != reflect.Pointer && knd != reflect.Struct && knd != reflect.Map && knd != reflect.Slice && knd != reflect.Array { return errors.Join( + fmt.Errorf("unexpected type: %T", node), //nolint:err113 // err wrapping is carried out by errors.Join, not fmt.Errorf. ErrUnsupportedValueType, ErrPointer, ) } + l := len(p.referenceTokens) + + // full document when empty + if l == 0 { + return nil + } + if nameProvider == nil { nameProvider = jsonname.DefaultJSONNameProvider } - // Full document when empty - if len(p.referenceTokens) == 0 { - return nil + var decodedToken string + lastIndex := l - 1 + + if lastIndex > 0 { // skip if we only have one token in pointer + for _, token := range p.referenceTokens[:lastIndex] { + decodedToken = Unescape(token) + next, err := p.resolveNodeForToken(node, decodedToken, nameProvider) + if err != nil { + return err + } + + node = next + } } - lastI := len(p.referenceTokens) - 1 - for i, token := range p.referenceTokens { - isLastToken := i == lastI - decodedToken := Unescape(token) + // last token + decodedToken = Unescape(p.referenceTokens[lastIndex]) + + return setSingleImpl(node, data, decodedToken, nameProvider) +} - if isLastToken { +func (p *Pointer) resolveNodeForToken(node any, decodedToken string, nameProvider *jsonname.NameProvider) (next any, err error) { + // check for nil during traversal + if isNil(node) { + return nil, fmt.Errorf("cannot traverse through nil value at %q: %w", decodedToken, ErrPointer) + } - return setSingleImpl(node, data, decodedToken, nameProvider) + pointable, ok := node.(JSONPointable) + if ok { + r, err := pointable.JSONLookup(decodedToken) + if err != nil { + return nil, err } - // Check for nil during traversal - if isNil(node) { - return fmt.Errorf("cannot traverse through nil value at %q: %w", decodedToken, ErrPointer) + fld := reflect.ValueOf(r) + if fld.CanAddr() && fld.Kind() != reflect.Interface && fld.Kind() != reflect.Map && fld.Kind() != reflect.Slice && fld.Kind() != reflect.Pointer { + return fld.Addr().Interface(), nil } - rValue := reflect.Indirect(reflect.ValueOf(node)) - kind := rValue.Kind() + return r, nil + } - if rValue.Type().Implements(jsonPointableType) { - r, err := node.(JSONPointable).JSONLookup(decodedToken) - if err != nil { - return err - } - fld := reflect.ValueOf(r) - if fld.CanAddr() && fld.Kind() != reflect.Interface && fld.Kind() != reflect.Map && fld.Kind() != reflect.Slice && fld.Kind() != reflect.Ptr { - node = fld.Addr().Interface() - continue - } - node = r - continue + rValue := reflect.Indirect(reflect.ValueOf(node)) + kind := rValue.Kind() + + switch kind { + case reflect.Struct: + nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) + if !ok { + return nil, fmt.Errorf("object has no field %q: %w", decodedToken, ErrPointer) } - switch kind { //nolint:exhaustive - case reflect.Struct: - nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) - if !ok { - return fmt.Errorf("object has no field %q: %w", decodedToken, ErrPointer) - } - fld := rValue.FieldByName(nm) - if fld.CanAddr() && fld.Kind() != reflect.Interface && fld.Kind() != reflect.Map && fld.Kind() != reflect.Slice && fld.Kind() != reflect.Ptr { - node = fld.Addr().Interface() - continue - } - node = fld.Interface() + return typeFromValue(rValue.FieldByName(nm)), nil - case reflect.Map: - kv := reflect.ValueOf(decodedToken) - mv := rValue.MapIndex(kv) + case reflect.Map: + kv := reflect.ValueOf(decodedToken) + mv := rValue.MapIndex(kv) - if !mv.IsValid() { - return fmt.Errorf("object has no key %q: %w", decodedToken, ErrPointer) - } - if mv.CanAddr() && mv.Kind() != reflect.Interface && mv.Kind() != reflect.Map && mv.Kind() != reflect.Slice && mv.Kind() != reflect.Ptr { - node = mv.Addr().Interface() - continue - } - node = mv.Interface() + if !mv.IsValid() { + return nil, errNoKey(decodedToken) + } - case reflect.Slice: - tokenIndex, err := strconv.Atoi(decodedToken) - if err != nil { - return err - } - sLength := rValue.Len() - if tokenIndex < 0 || tokenIndex >= sLength { - return fmt.Errorf("index out of bounds array[0,%d] index '%d': %w", sLength, tokenIndex, ErrPointer) - } + return typeFromValue(mv), nil - elem := rValue.Index(tokenIndex) - if elem.CanAddr() && elem.Kind() != reflect.Interface && elem.Kind() != reflect.Map && elem.Kind() != reflect.Slice && elem.Kind() != reflect.Ptr { - node = elem.Addr().Interface() - continue - } - node = elem.Interface() + case reflect.Slice: + tokenIndex, err := strconv.Atoi(decodedToken) + if err != nil { + return nil, errors.Join(err, ErrPointer) + } - default: - return fmt.Errorf("invalid token reference %q: %w", decodedToken, ErrPointer) + sLength := rValue.Len() + if tokenIndex < 0 || tokenIndex >= sLength { + return nil, errOutOfBounds(sLength, tokenIndex) } - } - return nil + return typeFromValue(rValue.Index(tokenIndex)), nil + + default: + return nil, errInvalidReference(decodedToken) + } } func isNil(input any) bool { @@ -290,20 +295,28 @@ func isNil(input any) bool { } kind := reflect.TypeOf(input).Kind() - switch kind { //nolint:exhaustive - case reflect.Ptr, reflect.Map, reflect.Slice, reflect.Chan: + switch kind { + case reflect.Pointer, reflect.Map, reflect.Slice, reflect.Chan: return reflect.ValueOf(input).IsNil() default: return false } } -// GetForToken gets a value for a json pointer token 1 level deep +func typeFromValue(v reflect.Value) any { + if v.CanAddr() && v.Kind() != reflect.Interface && v.Kind() != reflect.Map && v.Kind() != reflect.Slice && v.Kind() != reflect.Pointer { + return v.Addr().Interface() + } + + return v.Interface() +} + +// GetForToken gets a value for a json pointer token 1 level deep. func GetForToken(document any, decodedToken string) (any, reflect.Kind, error) { return getSingleImpl(document, decodedToken, jsonname.DefaultJSONNameProvider) } -// SetForToken gets a value for a json pointer token 1 level deep +// SetForToken sets a value for a json pointer token 1 level deep. func SetForToken(document any, decodedToken string, value any) (any, error) { return document, setSingleImpl(document, value, decodedToken, jsonname.DefaultJSONNameProvider) } @@ -326,13 +339,15 @@ func getSingleImpl(node any, decodedToken string, nameProvider *jsonname.NamePro return getSingleImpl(*typed, decodedToken, nameProvider) } - switch kind { //nolint:exhaustive + switch kind { case reflect.Struct: nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) if !ok { return nil, kind, fmt.Errorf("object has no field %q: %w", decodedToken, ErrPointer) } + fld := rValue.FieldByName(nm) + return fld.Interface(), kind, nil case reflect.Map: @@ -342,78 +357,99 @@ func getSingleImpl(node any, decodedToken string, nameProvider *jsonname.NamePro if mv.IsValid() { return mv.Interface(), kind, nil } - return nil, kind, fmt.Errorf("object has no key %q: %w", decodedToken, ErrPointer) + + return nil, kind, errNoKey(decodedToken) case reflect.Slice: tokenIndex, err := strconv.Atoi(decodedToken) if err != nil { - return nil, kind, err + return nil, kind, errors.Join(err, ErrPointer) } sLength := rValue.Len() if tokenIndex < 0 || tokenIndex >= sLength { - return nil, kind, fmt.Errorf("index out of bounds array[0,%d] index '%d': %w", sLength-1, tokenIndex, ErrPointer) + return nil, kind, errOutOfBounds(sLength, tokenIndex) } elem := rValue.Index(tokenIndex) return elem.Interface(), kind, nil default: - return nil, kind, fmt.Errorf("invalid token reference %q: %w", decodedToken, ErrPointer) + return nil, kind, errInvalidReference(decodedToken) } } func setSingleImpl(node, data any, decodedToken string, nameProvider *jsonname.NameProvider) error { - rValue := reflect.Indirect(reflect.ValueOf(node)) - - // Check for nil to prevent panic when calling rValue.Type() + // check for nil to prevent panic when calling rValue.Type() if isNil(node) { return fmt.Errorf("cannot set field %q on nil value: %w", decodedToken, ErrPointer) } - if ns, ok := node.(JSONSetable); ok { // pointer impl + if ns, ok := node.(JSONSetable); ok { return ns.JSONSet(decodedToken, data) } - if rValue.Type().Implements(jsonSetableType) { - return node.(JSONSetable).JSONSet(decodedToken, data) - } + rValue := reflect.Indirect(reflect.ValueOf(node)) - switch rValue.Kind() { //nolint:exhaustive + switch rValue.Kind() { case reflect.Struct: nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) if !ok { return fmt.Errorf("object has no field %q: %w", decodedToken, ErrPointer) } + fld := rValue.FieldByName(nm) - if fld.IsValid() { - fld.Set(reflect.ValueOf(data)) + if !fld.CanSet() { + return fmt.Errorf("can't set struct field %s to %v: %w", nm, data, ErrPointer) } + + value := reflect.ValueOf(data) + valueType := value.Type() + assignedType := fld.Type() + + if !valueType.AssignableTo(assignedType) { + return fmt.Errorf("can't set value with type %T to field %s with type %v: %w", data, nm, assignedType, ErrPointer) + } + + fld.Set(value) + return nil case reflect.Map: kv := reflect.ValueOf(decodedToken) rValue.SetMapIndex(kv, reflect.ValueOf(data)) + return nil case reflect.Slice: tokenIndex, err := strconv.Atoi(decodedToken) if err != nil { - return err + return errors.Join(err, ErrPointer) } + sLength := rValue.Len() if tokenIndex < 0 || tokenIndex >= sLength { - return fmt.Errorf("index out of bounds array[0,%d] index '%d': %w", sLength, tokenIndex, ErrPointer) + return errOutOfBounds(sLength, tokenIndex) } elem := rValue.Index(tokenIndex) if !elem.CanSet() { return fmt.Errorf("can't set slice index %s to %v: %w", decodedToken, data, ErrPointer) } - elem.Set(reflect.ValueOf(data)) + + value := reflect.ValueOf(data) + valueType := value.Type() + assignedType := elem.Type() + + if !valueType.AssignableTo(assignedType) { + return fmt.Errorf("can't set value with type %T to slice element %d with type %v: %w", data, tokenIndex, assignedType, ErrPointer) + } + + elem.Set(value) + return nil default: - return fmt.Errorf("invalid token reference %q: %w", decodedToken, ErrPointer) + return errInvalidReference(decodedToken) } } @@ -444,13 +480,14 @@ func offsetSingleObject(dec *json.Decoder, decodedToken string) (int64, error) { return 0, fmt.Errorf("invalid token %#v: %w", tk, ErrPointer) } } + return 0, fmt.Errorf("token reference %q not found: %w", decodedToken, ErrPointer) } func offsetSingleArray(dec *json.Decoder, decodedToken string) (int64, error) { idx, err := strconv.Atoi(decodedToken) if err != nil { - return 0, fmt.Errorf("token reference %q is not a number: %v: %w", decodedToken, err, ErrPointer) + return 0, fmt.Errorf("token reference %q is not a number: %w: %w", decodedToken, err, ErrPointer) } var i int for i = 0; i < idx && dec.More(); i++ { @@ -476,10 +513,12 @@ func offsetSingleArray(dec *json.Decoder, decodedToken string) (int64, error) { if !dec.More() { return 0, fmt.Errorf("token reference %q not found: %w", decodedToken, ErrPointer) } + return dec.InputOffset(), nil } // drainSingle drains a single level of object or array. +// // The decoder has to guarantee the beginning delim (i.e. '{' or '[') has been consumed. func drainSingle(dec *json.Decoder) error { for dec.More() { @@ -501,14 +540,15 @@ func drainSingle(dec *json.Decoder) error { } } - // Consumes the ending delim + // consumes the ending delim if _, err := dec.Token(); err != nil { return err } + return nil } -// Specific JSON pointer encoding here +// JSON pointer encoding: // ~0 => ~ // ~1 => / // ... and vice versa @@ -520,16 +560,24 @@ const ( decRefTok1 = `/` ) -// Unescape unescapes a json pointer reference token string to the original representation +var ( + encRefTokReplacer = strings.NewReplacer(encRefTok1, decRefTok1, encRefTok0, decRefTok0) //nolint:gochecknoglobals // it's okay to declare a replacer as a private global + decRefTokReplacer = strings.NewReplacer(decRefTok1, encRefTok1, decRefTok0, encRefTok0) //nolint:gochecknoglobals // it's okay to declare a replacer as a private global +) + +// Unescape unescapes a json pointer reference token string to the original representation. func Unescape(token string) string { - step1 := strings.ReplaceAll(token, encRefTok1, decRefTok1) - step2 := strings.ReplaceAll(step1, encRefTok0, decRefTok0) - return step2 + return encRefTokReplacer.Replace(token) } -// Escape escapes a pointer reference token string +// Escape escapes a pointer reference token string. +// +// The JSONPointer specification defines "/" as a separator and "~" as an escape prefix. +// +// Keys containing such characters are escaped with the following rules: +// +// - "~" is escaped as "~0" +// - "/" is escaped as "~1" func Escape(token string) string { - step1 := strings.ReplaceAll(token, decRefTok0, encRefTok0) - step2 := strings.ReplaceAll(step1, decRefTok1, encRefTok1) - return step2 + return decRefTokReplacer.Replace(token) } diff --git a/vendor/github.com/go-openapi/jsonreference/.golangci.yml b/vendor/github.com/go-openapi/jsonreference/.golangci.yml index 568ce16d7..7cea1af8b 100644 --- a/vendor/github.com/go-openapi/jsonreference/.golangci.yml +++ b/vendor/github.com/go-openapi/jsonreference/.golangci.yml @@ -16,7 +16,7 @@ linters: - godox - gosmopolitan - inamedparam - - intrange # disabled while < go1.22 + #- intrange # disabled while < go1.22 - ireturn - lll - musttag diff --git a/vendor/github.com/go-openapi/jsonreference/NOTICE b/vendor/github.com/go-openapi/jsonreference/NOTICE new file mode 100644 index 000000000..f9ad7e0f7 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/NOTICE @@ -0,0 +1,36 @@ +Copyright 2015-2025 go-swagger maintainers + +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +This software library, github.com/go-openapi/jsonpointer, includes software developed +by the go-swagger and go-openapi maintainers ("go-swagger maintainers"). + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this software except in compliance with the License. +You may obtain a copy of the License at + +This software is copied from, derived from, and inspired by other original software products. +It ships with copies of other software which license terms are recalled below. + +The original sofware was authored on 25-02-2013 by sigu-399 (https://github.com/sigu-399, sigu.399@gmail.com). + +github.com/sigh-399/jsonpointer +=========================== + +// SPDX-FileCopyrightText: Copyright 2013 sigu-399 ( https://github.com/sigu-399 ) +// SPDX-License-Identifier: Apache-2.0 + +Copyright 2013 sigu-399 ( https://github.com/sigu-399 ) + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/go-openapi/jsonreference/README.md b/vendor/github.com/go-openapi/jsonreference/README.md index c7fc2049c..2274a4b78 100644 --- a/vendor/github.com/go-openapi/jsonreference/README.md +++ b/vendor/github.com/go-openapi/jsonreference/README.md @@ -17,3 +17,10 @@ Feature complete. Stable API * http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 * http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03 + +## Licensing + +This library ships under the [SPDX-License-Identifier: Apache-2.0](./LICENSE). + +See the license [NOTICE](./NOTICE), which recalls the licensing terms of all the pieces of software +on top of which it has been built. diff --git a/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go b/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go index f0610cf1e..ca79391dc 100644 --- a/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go +++ b/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go @@ -1,3 +1,6 @@ +// SPDX-FileCopyrightText: Copyright (c) 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + package internal import ( diff --git a/vendor/github.com/go-openapi/jsonreference/reference.go b/vendor/github.com/go-openapi/jsonreference/reference.go index 6a1fed5df..33d4798ca 100644 --- a/vendor/github.com/go-openapi/jsonreference/reference.go +++ b/vendor/github.com/go-openapi/jsonreference/reference.go @@ -1,27 +1,5 @@ -// Copyright 2013 sigu-399 ( https://github.com/sigu-399 ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author sigu-399 -// author-github https://github.com/sigu-399 -// author-mail sigu.399@gmail.com -// -// repository-name jsonreference -// repository-desc An implementation of JSON Reference - Go language -// -// description Main and unique file. -// -// created 26-02-2013 +// SPDX-FileCopyrightText: Copyright (c) 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 package jsonreference diff --git a/vendor/github.com/go-openapi/swag/.codecov.yml b/vendor/github.com/go-openapi/swag/.codecov.yml new file mode 100644 index 000000000..3354f44b2 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/.codecov.yml @@ -0,0 +1,4 @@ +ignore: + - jsonutils/fixtures_test + - jsonutils/adapters/ifaces/mocks + - jsonutils/adapters/testintegration/benchmarks diff --git a/vendor/github.com/go-openapi/swag/.golangci.yml b/vendor/github.com/go-openapi/swag/.golangci.yml index 66eac0df8..126264a6b 100644 --- a/vendor/github.com/go-openapi/swag/.golangci.yml +++ b/vendor/github.com/go-openapi/swag/.golangci.yml @@ -17,10 +17,11 @@ linters: - gomoddirectives - gosmopolitan - inamedparam - - intrange # disabled while < go1.22 + - intrange - ireturn - lll - musttag + - modernize - nestif - nlreturn - nonamedreturns @@ -29,6 +30,7 @@ linters: - recvcheck - testpackage - thelper + - tagliatelle - tparallel - unparam - varnamelen diff --git a/vendor/github.com/go-openapi/swag/.mockery.yml b/vendor/github.com/go-openapi/swag/.mockery.yml new file mode 100644 index 000000000..8557cb58d --- /dev/null +++ b/vendor/github.com/go-openapi/swag/.mockery.yml @@ -0,0 +1,30 @@ +all: false +dir: '{{.InterfaceDir}}' +filename: mocks_test.go +force-file-write: true +formatter: goimports +include-auto-generated: false +log-level: info +structname: '{{.Mock}}{{.InterfaceName}}' +pkgname: '{{.SrcPackageName}}' +recursive: false +require-template-schema-exists: true +template: matryer +template-schema: '{{.Template}}.schema.json' +packages: + github.com/go-openapi/swag/jsonutils/adapters/ifaces: + config: + dir: jsonutils/adapters/ifaces/mocks + filename: mocks.go + pkgname: 'mocks' + force-file-write: true + all: true + github.com/go-openapi/swag/jsonutils/adapters/testintegration: + config: + inpackage: true + dir: jsonutils/adapters/testintegration + force-file-write: true + all: true + interfaces: + EJMarshaler: + EJUnmarshaler: diff --git a/vendor/github.com/go-openapi/swag/README.md b/vendor/github.com/go-openapi/swag/README.md index 1efb7a342..371fd55fd 100644 --- a/vendor/github.com/go-openapi/swag/README.md +++ b/vendor/github.com/go-openapi/swag/README.md @@ -9,91 +9,146 @@ Package `swag` contains a bunch of helper functions for go-openapi and go-swagge You may also use it standalone for your projects. +> **NOTE** > `swag` is one of the foundational building blocks of the go-openapi initiative. -> > Most repositories in `github.com/go-openapi/...` depend on it in some way. -> So does the CLI tool `github.com/go-swagger/go-swagger`, -> and the code generated by this tool. +> And so does our CLI tool `github.com/go-swagger/go-swagger`, +> as well as the code generated by this tool. + +* [Contents](#contents) +* [Dependencies](#dependencies) +* [Release Notes](#release-notes) +* [Licensing](#licensing) +* [Note to contributors](#note-to-contributors) +* [TODOs, suggestions and plans](#todos-suggestions-and-plans) ## Contents -`go-openapi/swag` now exposes a collection of relatively independent modules. +`go-openapi/swag` exposes a collection of relatively independent modules. -Here is what is inside: +Moving forward, no additional feature will be added to the `swag` API directly at the root package level, +which remains there for backward-compatibility purposes. All exported top-level features are now deprecated. -* Module `cmdutils` +Child modules will continue to evolve and some new ones may be added in the future. - * [x] utilities to work with CLIs +| Module | Content | Main features | +|---------------|---------|---------------| +| `cmdutils` | utilities to work with CLIs || +| `conv` | type conversion utilities | convert between values and pointers for any types
convert from string to builtin types (wraps `strconv`)
require `./typeutils` (test dependency)
| +| `fileutils` | file utilities | | +| `jsonname` | JSON utilities | infer JSON names from `go` properties
| +| `jsonutils` | JSON utilities | fast json concatenation
read and write JSON from and to dynamic `go` data structures
~require `github.com/mailru/easyjson`~
| +| `loading` | file loading | load from file or http
require `./yamlutils`
| +| `mangling` | safe name generation | name mangling for `go`
| +| `netutils` | networking utilities | host, port from address
| +| `stringutils` | `string` utilities | search in slice (with case-insensitive)
split/join query parameters as arrays
| +| `typeutils` | `go` types utilities | check the zero value for any type
safe check for a nil value
| +| `yamlutils` | YAML utilities | converting YAML to JSON
loading YAML into a dynamic YAML document
maintaining the original order of keys in YAML objects
require `./jsonutils`
~require `github.com/mailru/easyjson`~
require `go.yaml.in/yaml/v3`
| -* Module `conv` +--- - * [x] convert between values and pointers for any types - * [x] convert from string to builtin types (wraps `strconv`) - * [x] require `./typeutils` (test dependency) +## Dependencies + +The root module `github.com/go-openapi/swag` at the repo level maintains a few +dependencies outside of the standard library. -* Module `fileutils` +* YAML utilities depend on `go.yaml.in/yaml/v3` +* JSON utilities depend on their registered adapter module: + * by default, only the standard library is used + * `github.com/mailru/easyjson` is now only a dependency for module + `github.com/go-openapi/swag/jsonutils/adapters/easyjson/json`, + for users willing to import that module. + * integration tests and benchmarks use all the dependencies are published as their own module +* other dependencies are test dependencies drawn from `github.com/stretchr/testify` - * [x] file upload type - * [x] search in path (deprecated) +## Release notes -* Module `jsonname` +### v0.25.4 - * [x] infer JSON names from go properties +** mangling** -* Module `jsonutils` +Bug fix - * [x] fast json concatenation - * [x] read and write JSON from and to dynamic go data structures - * [x] require `github.com/mailru/easyjson` +* [x] mangler may panic with pluralized overlapping initialisms -* Module `loading` +Tests - * [x] load from file or http - * [x] require `./yamlutils` +* [x] introduced fuzz tests -* Module `mangling` +### v0.25.3 - * [x] name mangling for go +** mangling** -* Module `netutils` +Bug fix - * [x] host, port from address +* [x] mangler may panic with pluralized initialisms -* Module `stringutils` +### v0.25.2 - * [x] search in slice (with case-insensitive) - * [x] split/join query parameters as arrays +Minor changes due to internal maintenance that don't affect the behavior of the library. -* Module `typeutils` +* [x] removed indirect test dependencies by switching all tests to `go-openapi/testify`, + a fork of `stretch/testify` with zero-dependencies. +* [x] improvements to CI to catch test reports. +* [x] modernized licensing annotations in source code, using the more compact SPDX annotations + rather than the full license terms. +* [x] simplified a bit JSON & YAML testing by using newly available assertions +* started the journey to an OpenSSF score card badge: + * [x] explicited permissions in CI workflows + * [x] published security policy + * pinned dependencies to github actions + * introduced fuzzing in tests - * [x] check the zero value for any type +### v0.25.1 -* Module `yamlutils` +* fixes a data race that could occur when using the standard library implementation of a JSON ordered map - * [x] converting YAML to JSON - * [x] loading YAML into a dynamic YAML document - * [x] require `./jsonutils` - * [x] require `github.com/mailru/easyjson` - * [x] require `gopkg.in/yaml.v3` +### v0.25.0 ---- +**New with this release**: -The root module `github.com/go-openapi/swag` at the repo level maintains a few -dependencies outside of the standard library: +* requires `go1.24`, as iterators are being introduced +* removes the dependency to `mailru/easyjson` by default (#68) + * functionality remains the same, but performance may somewhat degrade for applications + that relied on `easyjson` + * users of the JSON or YAML utilities who want to use `easyjson` as their preferred JSON serializer library + will be able to do so by registering this the corresponding JSON adapter at runtime. See below. + * ordered keys in JSON and YAML objects: this feature used to rely solely on `easyjson`. + With this release, an implementation relying on the standard `encoding/json` is provided. + * an independent [benchmark](./jsonutils/adapters/testintegration/benchmarks/README.md) to compare the different adapters +* improves the "float is integer" check (`conv.IsFloat64AJSONInteger`) (#59) +* removes the _direct_ dependency to `gopkg.in/yaml.v3` (indirect dependency is still incurred through `stretchr/testify`) (#127) +* exposed `conv.IsNil()` (previously kept private): a safe nil check (accounting for the "non-nil interface with nil value" nonsensical go trick) -* YAML utilities depend on `gopkg.in/yaml.v3` -* JSON utilities `github.com/mailru/easyjson` +**What coming next?** -This is not necessarily the case for all sub-modules. +Moving forward, we want to : +* provide an implementation of the JSON adapter based on `encoding/json/v2`, for `go1.25` builds. +* provide similar implementations for `goccy/go-json` and `jsoniterator/go`, and perhaps some other + similar libraries may be interesting too. -## Release notes -### v0.25.0 [draft, unreleased] +**How to explicitly register a dependency at runtime**? -* v0.25.0 will remove the dependency to `mailru/easyjson` by default. -* users of JSON or YAML utility who want to use `easyjson` as their - prefered JSON marshaler will be able to do so by registering it - at runtime. +The following would maintain how JSON utilities proposed by `swag` used work, up to `v0.24.1`. + + ```go + import ( + "github.com/go-openapi/swag/jsonutils/adapters" + easyjson "github.com/go-openapi/swag/jsonutils/adapters/easyjson/json" + ) + + func init() { + easyjson.Register(adapters.Registry) + } + ``` + +Subsequent calls to `jsonutils.ReadJSON()` or `jsonutils.WriteJSON()` will switch to `easyjson` +whenever the passed data structures implement the `easyjson.Unmarshaler` or `easyjson.Marshaler` respectively, +or fallback to the standard library. + +For more details, you may also look at our +[integration tests](jsonutils/adapters/testintegration/integration_suite_test.go#29). ### v0.24.0 @@ -121,14 +176,13 @@ With this release, we have largely modernized the API of `swag`: --- -Moving forward, no additional feature will be added to the `swag` API directly. - -However, child modules will continue to evolve or some new ones may be added in the future. +## Licensing +This library ships under the [SPDX-License-Identifier: Apache-2.0](./LICENSE). -#### Note to contributors +## Note to contributors -The mono-repo structure comes with some unavoidable extra pains... +A mono-repo structure comes with some unavoidable extra pains... * Testing @@ -153,7 +207,14 @@ The mono-repo structure comes with some unavoidable extra pains... > We'd like to adopt the rule that modules in this repo would only differ by a patch version > (e.g. `v0.24.5` vs `v0.24.3`), and we'll level all modules whenever a minor version is introduced. > -> A script in `./hack` is provided to tag all modules in one go at the same level in one go. +> A script in `./hack` is provided to tag all modules with the same version in one go. + +* Continuous integration + +> At this moment, all tests in all modules are systematically run over the full test matrix (3 platform x 2 go releases). +> This generates quite a lot of jobs. +> +> We ought to reduce the number of jobs required to test a PR focused on only a few modules. ## Todos, suggestions and plans @@ -161,11 +222,18 @@ All kinds of contributions are welcome. A few ideas: -* [ ] Complete the split of dependencies to isolate easyjson from the rest +* [x] Complete the split of dependencies to isolate easyjson from the rest +* [x] Improve CI to reduce needed tests +* [x] Replace dependency to `gopkg.in/yaml.v3` (`yamlutil`) * [ ] Improve mangling utilities (improve readability, support for capitalized words, better word substitution for non-letter symbols...) * [ ] Move back to this common shared pot a few of the technical features introduced by go-swagger independently (e.g. mangle go package names, search package with go modules support, ...) * [ ] Apply a similar mono-repo approach to go-openapi/strfmt which suffer from similar woes: bloated API, imposed dependency to some database driver. - +* [ ] Adapt `go-swagger` (incl. generated code) to the new `swag` API. +* [ ] Factorize some tests, as there is a lot of redundant testing code in `jsonutils` +* [ ] Benchmark & profiling: publish independently the tool built to analyze and chart benchmarks (e.g. similar to `benchvisual`) +* [ ] more thorough testing for nil / null case +* [ ] ci pipeline to manage releases +* [ ] cleaner mockery generation (doesn't work out of the box for all sub-modules) diff --git a/vendor/github.com/go-openapi/swag/SECURITY.md b/vendor/github.com/go-openapi/swag/SECURITY.md new file mode 100644 index 000000000..72296a831 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/SECURITY.md @@ -0,0 +1,19 @@ +# Security Policy + +This policy outlines the commitment and practices of the go-openapi maintainers regarding security. + +## Supported Versions + +| Version | Supported | +| ------- | ------------------ | +| 0.25.x | :white_check_mark: | + +## Reporting a vulnerability + +If you become aware of a security vulnerability that affects the current repository, +please report it privately to the maintainers. + +Please follow the instructions provided by github to +[Privately report a security vulnerability](https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing-information-about-vulnerabilities/privately-reporting-a-security-vulnerability#privately-reporting-a-security-vulnerability). + +TL;DR: on Github, navigate to the project's "Security" tab then click on "Report a vulnerability". diff --git a/vendor/github.com/go-openapi/swag/cmdutils/cmd_utils.go b/vendor/github.com/go-openapi/swag/cmdutils/cmd_utils.go index bc01ec2bb..6c7bbb26f 100644 --- a/vendor/github.com/go-openapi/swag/cmdutils/cmd_utils.go +++ b/vendor/github.com/go-openapi/swag/cmdutils/cmd_utils.go @@ -1,16 +1,5 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 package cmdutils @@ -20,5 +9,5 @@ package cmdutils type CommandLineOptionsGroup struct { ShortDescription string LongDescription string - Options interface{} + Options any } diff --git a/vendor/github.com/go-openapi/swag/cmdutils/doc.go b/vendor/github.com/go-openapi/swag/cmdutils/doc.go index 63ac1d17e..31f2c3753 100644 --- a/vendor/github.com/go-openapi/swag/cmdutils/doc.go +++ b/vendor/github.com/go-openapi/swag/cmdutils/doc.go @@ -1,16 +1,5 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 // Package cmdutils brings helpers for CLIs produced by go-openapi package cmdutils diff --git a/vendor/github.com/go-openapi/swag/cmdutils_iface.go b/vendor/github.com/go-openapi/swag/cmdutils_iface.go index 1eaf36f15..bd0c1fc12 100644 --- a/vendor/github.com/go-openapi/swag/cmdutils_iface.go +++ b/vendor/github.com/go-openapi/swag/cmdutils_iface.go @@ -1,16 +1,5 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 package swag diff --git a/vendor/github.com/go-openapi/swag/conv/convert.go b/vendor/github.com/go-openapi/swag/conv/convert.go index b9b869854..f205c3913 100644 --- a/vendor/github.com/go-openapi/swag/conv/convert.go +++ b/vendor/github.com/go-openapi/swag/conv/convert.go @@ -1,16 +1,5 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 package conv diff --git a/vendor/github.com/go-openapi/swag/conv/convert_types.go b/vendor/github.com/go-openapi/swag/conv/convert_types.go index 423e8663f..cf4c6495e 100644 --- a/vendor/github.com/go-openapi/swag/conv/convert_types.go +++ b/vendor/github.com/go-openapi/swag/conv/convert_types.go @@ -1,20 +1,13 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 package conv -// The original version of this file, eons ago, was taken from the aws go sdk +// Unlicensed credits (idea, concept) +// +// The idea to convert values to pointers and the other way around, was inspired, eons ago, by the aws go sdk. +// +// Nowadays, all sensible API sdk's expose a similar functionality. // Pointer returns a pointer to the value passed in. func Pointer[T any](v T) *T { diff --git a/vendor/github.com/go-openapi/swag/conv/doc.go b/vendor/github.com/go-openapi/swag/conv/doc.go index b02711f42..1bd6ead6e 100644 --- a/vendor/github.com/go-openapi/swag/conv/doc.go +++ b/vendor/github.com/go-openapi/swag/conv/doc.go @@ -1,16 +1,5 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 // Package conv exposes utilities to convert types. // diff --git a/vendor/github.com/go-openapi/swag/conv/format.go b/vendor/github.com/go-openapi/swag/conv/format.go index db7562a4a..5b87b8e14 100644 --- a/vendor/github.com/go-openapi/swag/conv/format.go +++ b/vendor/github.com/go-openapi/swag/conv/format.go @@ -1,16 +1,5 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 package conv diff --git a/vendor/github.com/go-openapi/swag/conv/sizeof.go b/vendor/github.com/go-openapi/swag/conv/sizeof.go index 646f8be9a..494346557 100644 --- a/vendor/github.com/go-openapi/swag/conv/sizeof.go +++ b/vendor/github.com/go-openapi/swag/conv/sizeof.go @@ -1,3 +1,6 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + package conv import "unsafe" diff --git a/vendor/github.com/go-openapi/swag/conv/type_constraints.go b/vendor/github.com/go-openapi/swag/conv/type_constraints.go index 3c6149836..81135e827 100644 --- a/vendor/github.com/go-openapi/swag/conv/type_constraints.go +++ b/vendor/github.com/go-openapi/swag/conv/type_constraints.go @@ -1,16 +1,5 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 package conv diff --git a/vendor/github.com/go-openapi/swag/conv_iface.go b/vendor/github.com/go-openapi/swag/conv_iface.go index 9991acb65..eea7b2e56 100644 --- a/vendor/github.com/go-openapi/swag/conv_iface.go +++ b/vendor/github.com/go-openapi/swag/conv_iface.go @@ -1,16 +1,5 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 package swag diff --git a/vendor/github.com/go-openapi/swag/doc.go b/vendor/github.com/go-openapi/swag/doc.go index b5af82982..b54b57478 100644 --- a/vendor/github.com/go-openapi/swag/doc.go +++ b/vendor/github.com/go-openapi/swag/doc.go @@ -1,78 +1,47 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 // Package swag contains a bunch of helper functions for go-openapi and go-swagger projects. // // You may also use it standalone for your projects. // -// NOTE: all features that were previously exposed as package-level members (constants, variables, +// NOTE: all features that used to be exposed as package-level members (constants, variables, // functions and types) are now deprecated and are superseded by equivalent features in // more specialized sub-packages. +// Moving forward, no additional feature will be added to the [swag] API directly at the root package level, +// which remains there for backward-compatibility purposes. // -// Here is what is inside: -// -// Module [cmdutils]: -// -// - utilities to work with CLIs -// -// Module [conv]: -// -// - convert between value and pointers for builtin types -// - convert from string to builtin types (wraps strconv) +// Child modules will continue to evolve or some new ones may be added in the future. // -// Module [fileutils]: +// # Modules // -// - file upload type -// - search in path +// - [cmdutils] utilities to work with CLIs // -// Module [jsonname]: +// - [conv] type conversion utilities // -// - json names for go properties +// - [fileutils] file utilities // -// Module [jsonutils]: +// - [jsonname] JSON utilities // -// - fast json concatenation -// - read and write JSON from and to dynamic go data structures +// - [jsonutils] JSON utilities // -// Module [loading]: +// - [loading] file loading // -// - load from file or http +// - [mangling] safe name generation // -// Module [mangling]: +// - [netutils] networking utilities // -// - name mangling to generate clean identifiers +// - [stringutils] `string` utilities // -// Module [netutils]: +// - [typeutils] `go` types utilities // -// - host, port from address +// - [yamlutils] YAML utilities // -// Module [stringutils]: -// -// - find string in list -// - join/split arrays of query parameters -// -// Module [typeutils]: -// -// - check the zero value of any type -// -// Module [yamlutils]: -// -// - converting YAML to JSON -// - loading YAML into a dynamic YAML document +// # Dependencies // // This repo has a few dependencies outside of the standard library: // -// - YAML utilities depend on [gopkg.in/yaml.v3] -// - JSON utilities depend on [github.com/mailru/easyjson] +// - YAML utilities depend on [go.yaml.in/yaml/v3] package swag + +//go:generate mockery diff --git a/vendor/github.com/go-openapi/swag/fileutils/doc.go b/vendor/github.com/go-openapi/swag/fileutils/doc.go index 4b48e7196..859a200d8 100644 --- a/vendor/github.com/go-openapi/swag/fileutils/doc.go +++ b/vendor/github.com/go-openapi/swag/fileutils/doc.go @@ -1,16 +1,5 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 // Package fileutils exposes utilities to deal with files and paths. // diff --git a/vendor/github.com/go-openapi/swag/fileutils/file.go b/vendor/github.com/go-openapi/swag/fileutils/file.go index b17eaba58..5ad4cfaea 100644 --- a/vendor/github.com/go-openapi/swag/fileutils/file.go +++ b/vendor/github.com/go-openapi/swag/fileutils/file.go @@ -1,16 +1,5 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 package fileutils diff --git a/vendor/github.com/go-openapi/swag/fileutils/path.go b/vendor/github.com/go-openapi/swag/fileutils/path.go index a70ccb418..dd09f690b 100644 --- a/vendor/github.com/go-openapi/swag/fileutils/path.go +++ b/vendor/github.com/go-openapi/swag/fileutils/path.go @@ -1,16 +1,5 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 package fileutils @@ -38,11 +27,17 @@ func FindInSearchPath(searchPath, pkg string) string { } // FindInGoSearchPath finds a package in the $GOPATH:$GOROOT +// +// Deprecated: this function is no longer relevant with modern go. +// It uses [runtime.GOROOT] under the hood, which is deprecated as of go1.24. func FindInGoSearchPath(pkg string) string { return FindInSearchPath(FullGoSearchPath(), pkg) } // FullGoSearchPath gets the search paths for finding packages +// +// Deprecated: this function is no longer relevant with modern go. +// It uses [runtime.GOROOT] under the hood, which is deprecated as of go1.24. func FullGoSearchPath() string { allPaths := os.Getenv(GOPATHKey) if allPaths == "" { diff --git a/vendor/github.com/go-openapi/swag/fileutils_iface.go b/vendor/github.com/go-openapi/swag/fileutils_iface.go index 0c639e8c1..f3e79a0e4 100644 --- a/vendor/github.com/go-openapi/swag/fileutils_iface.go +++ b/vendor/github.com/go-openapi/swag/fileutils_iface.go @@ -1,16 +1,5 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 package swag diff --git a/vendor/github.com/go-openapi/swag/go.work b/vendor/github.com/go-openapi/swag/go.work new file mode 100644 index 000000000..1e537f074 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/go.work @@ -0,0 +1,20 @@ +use ( + . + ./cmdutils + ./conv + ./fileutils + ./jsonname + ./jsonutils + ./jsonutils/adapters/easyjson + ./jsonutils/adapters/testintegration + ./jsonutils/adapters/testintegration/benchmarks + ./jsonutils/fixtures_test + ./loading + ./mangling + ./netutils + ./stringutils + ./typeutils + ./yamlutils +) + +go 1.24.0 diff --git a/vendor/github.com/go-openapi/swag/go.work.sum b/vendor/github.com/go-openapi/swag/go.work.sum new file mode 100644 index 000000000..c1308cafa --- /dev/null +++ b/vendor/github.com/go-openapi/swag/go.work.sum @@ -0,0 +1,7 @@ +github.com/go-openapi/testify/v2 v2.0.1/go.mod h1:HCPmvFFnheKK2BuwSA0TbbdxJ3I16pjwMkYkP4Ywn54= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= +golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= +golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= diff --git a/vendor/github.com/go-openapi/swag/jsonname/doc.go b/vendor/github.com/go-openapi/swag/jsonname/doc.go index b2e0c80fc..79232eaca 100644 --- a/vendor/github.com/go-openapi/swag/jsonname/doc.go +++ b/vendor/github.com/go-openapi/swag/jsonname/doc.go @@ -1,16 +1,5 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 // Package jsonname is a provider of json property names from go properties. package jsonname diff --git a/vendor/github.com/go-openapi/swag/jsonname/name_provider.go b/vendor/github.com/go-openapi/swag/jsonname/name_provider.go index e87aac2f7..8eaf1bece 100644 --- a/vendor/github.com/go-openapi/swag/jsonname/name_provider.go +++ b/vendor/github.com/go-openapi/swag/jsonname/name_provider.go @@ -1,16 +1,5 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 package jsonname @@ -90,7 +79,7 @@ func newNameIndex(tpe reflect.Type) nameIndex { } // GetJSONNames gets all the json property names for a type -func (n *NameProvider) GetJSONNames(subject interface{}) []string { +func (n *NameProvider) GetJSONNames(subject any) []string { n.lock.Lock() defer n.lock.Unlock() tpe := reflect.Indirect(reflect.ValueOf(subject)).Type() @@ -107,7 +96,7 @@ func (n *NameProvider) GetJSONNames(subject interface{}) []string { } // GetJSONName gets the json name for a go property name -func (n *NameProvider) GetJSONName(subject interface{}, name string) (string, bool) { +func (n *NameProvider) GetJSONName(subject any, name string) (string, bool) { tpe := reflect.Indirect(reflect.ValueOf(subject)).Type() return n.GetJSONNameForType(tpe, name) } @@ -125,7 +114,7 @@ func (n *NameProvider) GetJSONNameForType(tpe reflect.Type, name string) (string } // GetGoName gets the go name for a json property name -func (n *NameProvider) GetGoName(subject interface{}, name string) (string, bool) { +func (n *NameProvider) GetGoName(subject any, name string) (string, bool) { tpe := reflect.Indirect(reflect.ValueOf(subject)).Type() return n.GetGoNameForType(tpe, name) } diff --git a/vendor/github.com/go-openapi/swag/jsonname_iface.go b/vendor/github.com/go-openapi/swag/jsonname_iface.go index 555369d75..303a007f6 100644 --- a/vendor/github.com/go-openapi/swag/jsonname_iface.go +++ b/vendor/github.com/go-openapi/swag/jsonname_iface.go @@ -1,16 +1,5 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 package swag diff --git a/vendor/github.com/go-openapi/swag/jsonutils/README.md b/vendor/github.com/go-openapi/swag/jsonutils/README.md new file mode 100644 index 000000000..d745cdb46 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/README.md @@ -0,0 +1,108 @@ + # jsonutils + +`jsonutils` exposes a few tools to work with JSON: + +- a fast, simple `Concat` to concatenate (not merge) JSON objects and arrays +- `FromDynamicJSON` to convert a data structure into a "dynamic JSON" data structure +- `ReadJSON` and `WriteJSON` behave like `json.Unmarshal` and `json.Marshal`, + with the ability to use another underlying serialization library through an `Adapter` + configured at runtime +- a `JSONMapSlice` structure that may be used to store JSON objects with the order of keys maintained + +## Dynamic JSON + +We call "dynamic JSON" the go data structure that results from unmarshaling JSON like this: + +```go + var value any + jsonBytes := `{"a": 1, ... }` + _ = json.Unmarshal(jsonBytes, &value) +``` + +In this configuration, the standard library mappings are as follows: + +| JSON | go | +|-----------|------------------| +| `number` | `float64` | +| `string` | `string` | +| `boolean` | `bool` | +| `null` | `nil` | +| `object` | `map[string]any` | +| `array` | `[]any` | + +## Map slices + +When using `JSONMapSlice`, the ordering of keys is ensured by replacing +mappings to `map[string]any` by a `JSONMapSlice` which is an (ordered) +slice of `JSONMapItem`s. + +Notice that a similar feature is available for YAML (see [`yamlutils`](../yamlutils)), +with a `YAMLMapSlice` type based on the `JSONMapSlice`. + +`JSONMapSlice` is similar to an ordered map, but the keys are not retrieved +in constant time. + +Another difference with the the above standard mappings is that numbers don't always map +to a `float64`: if the value is a JSON integer, it unmarshals to `int64`. + +See also [some examples](https://pkg.go.dev/github.com/go-openapi/swag/jsonutils#pkg-examples) + +## Adapters + +`ReadJSON`, `WriteJSON` and `FromDynamicJSON` (which is a combination of the latter two) +are wrappers on top of `json.Unmarshal` and `json.Marshal`. + +By default, the adapter merely wraps the standard library. + +The adapter may be used to register other JSON serialization libraries, +possibly several ones at the same time. + +If the value passed is identified as an "ordered map" (i.e. implements `ifaces.Ordered` +or `ifaces.SetOrdered`, the adapter favors the "ordered" JSON behavior and tries to +find a registered implementation that support ordered keys in objects. + +Our standard library implementation supports this. + +As of `v0.25.0`, we support through such an adapter the popular `mailru/easyjson` +library, which kicks in when the passed values support the `easyjson.Unmarshaler` +or `easyjson.Marshaler` interfaces. + +In the future, we plan to add more similar libraries that compete on the go JSON +serializers scene. + +## Registering an adapter + +In package `github.com/go-openapi/swag/easyjson/adapters`, several adapters are available. + +Each adapter is an independent go module. Hence you'll pick its dependencies only if you import it. + +At this moment we provide: +* `stdlib`: JSON adapter based on the standard library +* `easyjson`: JSON adapter based on the `github.com/mailru/easyjson` + +The adapters provide the basic `Marshal` and `Unmarshal` capabilities, plus an implementation +of the `MapSlice` pattern. + +You may also build your own adapter based on your specific use-case. An adapter is not required to implement +all capabilities. + +Every adapter comes with a `Register` function, possibly with some options, to register the adapter +to a global registry. + +For example, to enable `easyjson` to be used in `ReadJSON` and `WriteJSON`, you would write something like: + +```go + import ( + "github.com/go-openapi/swag/jsonutils/adapters" + easyjson "github.com/go-openapi/swag/jsonutils/adapters/easyjson/json" + ) + + func init() { + easyjson.Register(adapters.Registry) + } +``` + +You may register several adapters. In this case, capability matching is evaluated from the last registered +adapters (LIFO). + +## [Benchmarks](./adapters/testintegration/benchmarks/README.md) diff --git a/vendor/github.com/go-openapi/swag/jsonutils/adapters/doc.go b/vendor/github.com/go-openapi/swag/jsonutils/adapters/doc.go new file mode 100644 index 000000000..76d3898fc --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/adapters/doc.go @@ -0,0 +1,8 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +// Package adapters exposes a registry of adapters to multiple +// JSON serialization libraries. +// +// All interfaces are defined in package [ifaces.Adapter]. +package adapters diff --git a/vendor/github.com/go-openapi/swag/jsonutils/adapters/ifaces/doc.go b/vendor/github.com/go-openapi/swag/jsonutils/adapters/ifaces/doc.go new file mode 100644 index 000000000..1fd43a1fa --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/adapters/ifaces/doc.go @@ -0,0 +1,5 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +// Package ifaces exposes all interfaces to work with adapters. +package ifaces diff --git a/vendor/github.com/go-openapi/swag/jsonutils/adapters/ifaces/ifaces.go b/vendor/github.com/go-openapi/swag/jsonutils/adapters/ifaces/ifaces.go new file mode 100644 index 000000000..7805e5e5e --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/adapters/ifaces/ifaces.go @@ -0,0 +1,84 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package ifaces + +import ( + _ "encoding/json" // for documentation purpose + "iter" +) + +// Ordered knows how to iterate over the (key,value) pairs of a JSON object. +type Ordered interface { + OrderedItems() iter.Seq2[string, any] +} + +// SetOrdered knows how to append or update the keys of a JSON object, +// given an iterator over (key,value) pairs. +// +// If the provided iterator is nil then the receiver should be set to nil. +type SetOrdered interface { + SetOrderedItems(iter.Seq2[string, any]) +} + +// OrderedMap represent a JSON object (i.e. like a map[string,any]), +// and knows how to serialize and deserialize JSON with the order of keys maintained. +type OrderedMap interface { + Ordered + SetOrdered + + OrderedMarshalJSON() ([]byte, error) + OrderedUnmarshalJSON([]byte) error +} + +// MarshalAdapter behaves likes the standard library [json.Marshal]. +type MarshalAdapter interface { + Poolable + + Marshal(any) ([]byte, error) +} + +// OrderedMarshalAdapter behaves likes the standard library [json.Marshal], preserving the order of keys in objects. +type OrderedMarshalAdapter interface { + Poolable + + OrderedMarshal(Ordered) ([]byte, error) +} + +// UnmarshalAdapter behaves likes the standard library [json.Unmarshal]. +type UnmarshalAdapter interface { + Poolable + + Unmarshal([]byte, any) error +} + +// OrderedUnmarshalAdapter behaves likes the standard library [json.Unmarshal], preserving the order of keys in objects. +type OrderedUnmarshalAdapter interface { + Poolable + + OrderedUnmarshal([]byte, SetOrdered) error +} + +// Adapter exposes an interface like the standard [json] library. +type Adapter interface { + MarshalAdapter + UnmarshalAdapter + + OrderedAdapter +} + +// OrderedAdapter exposes interfaces to process JSON and keep the order of object keys. +type OrderedAdapter interface { + OrderedMarshalAdapter + OrderedUnmarshalAdapter + NewOrderedMap(capacity int) OrderedMap +} + +type Poolable interface { + // Self-redeem: for [Adapter] s that are allocated from a pool. + // The [Adapter] must not be used after calling [Redeem]. + Redeem() + + // Reset the state of the [Adapter], if any. + Reset() +} diff --git a/vendor/github.com/go-openapi/swag/jsonutils/adapters/ifaces/registry_iface.go b/vendor/github.com/go-openapi/swag/jsonutils/adapters/ifaces/registry_iface.go new file mode 100644 index 000000000..2d6c69f4e --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/adapters/ifaces/registry_iface.go @@ -0,0 +1,91 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package ifaces + +import ( + "strings" +) + +// Capability indicates what a JSON adapter is capable of. +type Capability uint8 + +const ( + CapabilityMarshalJSON Capability = 1 << iota + CapabilityUnmarshalJSON + CapabilityOrderedMarshalJSON + CapabilityOrderedUnmarshalJSON + CapabilityOrderedMap +) + +func (c Capability) String() string { + switch c { + case CapabilityMarshalJSON: + return "MarshalJSON" + case CapabilityUnmarshalJSON: + return "UnmarshalJSON" + case CapabilityOrderedMarshalJSON: + return "OrderedMarshalJSON" + case CapabilityOrderedUnmarshalJSON: + return "OrderedUnmarshalJSON" + case CapabilityOrderedMap: + return "OrderedMap" + default: + return "" + } +} + +// Capabilities holds several unitary capability flags +type Capabilities uint8 + +// Has some capability flag enabled. +func (c Capabilities) Has(capability Capability) bool { + return Capability(c)&capability > 0 +} + +func (c Capabilities) String() string { + var w strings.Builder + + first := true + for _, capability := range []Capability{ + CapabilityMarshalJSON, + CapabilityUnmarshalJSON, + CapabilityOrderedMarshalJSON, + CapabilityOrderedUnmarshalJSON, + CapabilityOrderedMap, + } { + if c.Has(capability) { + if !first { + w.WriteByte('|') + } else { + first = false + } + w.WriteString(capability.String()) + } + } + + return w.String() +} + +const ( + AllCapabilities Capabilities = Capabilities(uint8(CapabilityMarshalJSON) | + uint8(CapabilityUnmarshalJSON) | + uint8(CapabilityOrderedMarshalJSON) | + uint8(CapabilityOrderedUnmarshalJSON) | + uint8(CapabilityOrderedMap)) + + AllUnorderedCapabilities Capabilities = Capabilities(uint8(CapabilityMarshalJSON) | uint8(CapabilityUnmarshalJSON)) +) + +// RegistryEntry describes how any given adapter registers its capabilities to the [Registrar]. +type RegistryEntry struct { + Who string + What Capabilities + Constructor func() Adapter + Support func(what Capability, value any) bool +} + +// Registrar is a type that knows how to keep registration calls from adapters. +type Registrar interface { + RegisterFor(RegistryEntry) +} diff --git a/vendor/github.com/go-openapi/swag/jsonutils/adapters/registry.go b/vendor/github.com/go-openapi/swag/jsonutils/adapters/registry.go new file mode 100644 index 000000000..3062acaff --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/adapters/registry.go @@ -0,0 +1,229 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package adapters + +import ( + "fmt" + "reflect" + "slices" + "sync" + + "github.com/go-openapi/swag/jsonutils/adapters/ifaces" + stdlib "github.com/go-openapi/swag/jsonutils/adapters/stdlib/json" +) + +// Registry holds the global registry for registered adapters. +var Registry = NewRegistrar() + +var ( + defaultRegistered = stdlib.Register + + _ ifaces.Registrar = &Registrar{} +) + +type registryError string + +func (e registryError) Error() string { + return string(e) +} + +// ErrRegistry indicates an error returned by the [Registrar]. +var ErrRegistry registryError = "JSON adapters registry error" + +type registry []*ifaces.RegistryEntry + +// Registrar holds registered [ifaces.Adapters] for different serialization capabilities. +// +// Internally, it maintains a cache for data types that favor a given adapter. +type Registrar struct { + marshalerRegistry registry + unmarshalerRegistry registry + orderedMarshalerRegistry registry + orderedUnmarshalerRegistry registry + orderedMapRegistry registry + + gmx sync.RWMutex + + // cache indexed by value type, so we don't have to lookup + marshalerCache map[reflect.Type]*ifaces.RegistryEntry + unmarshalerCache map[reflect.Type]*ifaces.RegistryEntry + orderedMarshalerCache map[reflect.Type]*ifaces.RegistryEntry + orderedUnmarshalerCache map[reflect.Type]*ifaces.RegistryEntry + orderedMapCache map[reflect.Type]*ifaces.RegistryEntry +} + +func NewRegistrar() *Registrar { + r := &Registrar{} + + r.marshalerRegistry = make(registry, 0, 1) + r.unmarshalerRegistry = make(registry, 0, 1) + r.orderedMarshalerRegistry = make(registry, 0, 1) + r.orderedUnmarshalerRegistry = make(registry, 0, 1) + r.orderedMapRegistry = make(registry, 0, 1) + + r.marshalerCache = make(map[reflect.Type]*ifaces.RegistryEntry) + r.unmarshalerCache = make(map[reflect.Type]*ifaces.RegistryEntry) + r.orderedMarshalerCache = make(map[reflect.Type]*ifaces.RegistryEntry) + r.orderedUnmarshalerCache = make(map[reflect.Type]*ifaces.RegistryEntry) + r.orderedMapCache = make(map[reflect.Type]*ifaces.RegistryEntry) + + defaultRegistered(r) + + return r +} + +// ClearCache resets the internal type cache. +func (r *Registrar) ClearCache() { + r.gmx.Lock() + r.clearCache() + r.gmx.Unlock() +} + +// Reset the [Registrar] to its defaults. +func (r *Registrar) Reset() { + r.gmx.Lock() + r.clearCache() + r.marshalerRegistry = r.marshalerRegistry[:0] + r.unmarshalerRegistry = r.unmarshalerRegistry[:0] + r.orderedMarshalerRegistry = r.orderedMarshalerRegistry[:0] + r.orderedUnmarshalerRegistry = r.orderedUnmarshalerRegistry[:0] + r.orderedMapRegistry = r.orderedMapRegistry[:0] + r.gmx.Unlock() + + defaultRegistered(r) +} + +// RegisterFor registers an adapter for some JSON capabilities. +func (r *Registrar) RegisterFor(entry ifaces.RegistryEntry) { + r.gmx.Lock() + if entry.What.Has(ifaces.CapabilityMarshalJSON) { + e := entry + e.What &= ifaces.Capabilities(ifaces.CapabilityMarshalJSON) + r.marshalerRegistry = slices.Insert(r.marshalerRegistry, 0, &e) + } + if entry.What.Has(ifaces.CapabilityUnmarshalJSON) { + e := entry + e.What &= ifaces.Capabilities(ifaces.CapabilityUnmarshalJSON) + r.unmarshalerRegistry = slices.Insert(r.unmarshalerRegistry, 0, &e) + } + if entry.What.Has(ifaces.CapabilityOrderedMarshalJSON) { + e := entry + e.What &= ifaces.Capabilities(ifaces.CapabilityOrderedMarshalJSON) + r.orderedMarshalerRegistry = slices.Insert(r.orderedMarshalerRegistry, 0, &e) + } + if entry.What.Has(ifaces.CapabilityOrderedUnmarshalJSON) { + e := entry + e.What &= ifaces.Capabilities(ifaces.CapabilityOrderedUnmarshalJSON) + r.orderedUnmarshalerRegistry = slices.Insert(r.orderedUnmarshalerRegistry, 0, &e) + } + if entry.What.Has(ifaces.CapabilityOrderedMap) { + e := entry + e.What &= ifaces.Capabilities(ifaces.CapabilityOrderedMap) + r.orderedMapRegistry = slices.Insert(r.orderedMapRegistry, 0, &e) + } + r.gmx.Unlock() +} + +// AdapterFor returns an [ifaces.Adapter] that supports this capability for this type of value. +// +// The [ifaces.Adapter] may be redeemed to its pool using its Redeem() method, for adapters that support global +// pooling. When this is not the case, the redeem function is just a no-operation. +func (r *Registrar) AdapterFor(capability ifaces.Capability, value any) ifaces.Adapter { + entry := r.findFirstFor(capability, value) + if entry == nil { + return nil + } + + return entry.Constructor() +} + +func (r *Registrar) clearCache() { + clear(r.marshalerCache) + clear(r.unmarshalerCache) + clear(r.orderedMarshalerCache) + clear(r.orderedUnmarshalerCache) + clear(r.orderedMapCache) +} + +func (r *Registrar) findFirstFor(capability ifaces.Capability, value any) *ifaces.RegistryEntry { + switch capability { + case ifaces.CapabilityMarshalJSON: + return r.findFirstInRegistryFor(r.marshalerRegistry, r.marshalerCache, capability, value) + case ifaces.CapabilityUnmarshalJSON: + return r.findFirstInRegistryFor(r.unmarshalerRegistry, r.unmarshalerCache, capability, value) + case ifaces.CapabilityOrderedMarshalJSON: + return r.findFirstInRegistryFor(r.orderedMarshalerRegistry, r.orderedMarshalerCache, capability, value) + case ifaces.CapabilityOrderedUnmarshalJSON: + return r.findFirstInRegistryFor(r.orderedUnmarshalerRegistry, r.orderedUnmarshalerCache, capability, value) + case ifaces.CapabilityOrderedMap: + return r.findFirstInRegistryFor(r.orderedMapRegistry, r.orderedMapCache, capability, value) + default: + panic(fmt.Errorf("unsupported capability %d: %w", capability, ErrRegistry)) + } +} + +func (r *Registrar) findFirstInRegistryFor(reg registry, cache map[reflect.Type]*ifaces.RegistryEntry, capability ifaces.Capability, value any) *ifaces.RegistryEntry { + r.gmx.RLock() + if len(reg) > 1 { + if entry, ok := cache[reflect.TypeOf(value)]; ok { + // cache hit + r.gmx.RUnlock() + return entry + } + } + + for _, entry := range reg { + if !entry.Support(capability, value) { + continue + } + + r.gmx.RUnlock() + + // update the internal cache + r.gmx.Lock() + cache[reflect.TypeOf(value)] = entry + r.gmx.Unlock() + + return entry + } + + // no adapter found + r.gmx.RUnlock() + + return nil +} + +// MarshalAdapterFor returns the first adapter that knows how to Marshal this type of value. +func MarshalAdapterFor(value any) ifaces.MarshalAdapter { + return Registry.AdapterFor(ifaces.CapabilityMarshalJSON, value) +} + +// OrderedMarshalAdapterFor returns the first adapter that knows how to OrderedMarshal this type of value. +func OrderedMarshalAdapterFor(value ifaces.Ordered) ifaces.OrderedMarshalAdapter { + return Registry.AdapterFor(ifaces.CapabilityOrderedMarshalJSON, value) +} + +// UnmarshalAdapterFor returns the first adapter that knows how to Unmarshal this type of value. +func UnmarshalAdapterFor(value any) ifaces.UnmarshalAdapter { + return Registry.AdapterFor(ifaces.CapabilityUnmarshalJSON, value) +} + +// OrderedUnmarshalAdapterFor provides the first adapter that knows how to OrderedUnmarshal this type of value. +func OrderedUnmarshalAdapterFor(value ifaces.SetOrdered) ifaces.OrderedUnmarshalAdapter { + return Registry.AdapterFor(ifaces.CapabilityOrderedUnmarshalJSON, value) +} + +// NewOrderedMap provides the "ordered map" implementation provided by the registry. +func NewOrderedMap(capacity int) ifaces.OrderedMap { + var v any + adapter := Registry.AdapterFor(ifaces.CapabilityOrderedUnmarshalJSON, v) + if adapter == nil { + return nil + } + + defer adapter.Redeem() + return adapter.NewOrderedMap(capacity) +} + +func noopRedeemer() {} diff --git a/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/adapter.go b/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/adapter.go new file mode 100644 index 000000000..0213ff5c2 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/adapter.go @@ -0,0 +1,115 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package json + +import ( + stdjson "encoding/json" + + "github.com/go-openapi/swag/jsonutils/adapters/ifaces" + "github.com/go-openapi/swag/typeutils" +) + +const sensibleBufferSize = 8192 + +type jsonError string + +func (e jsonError) Error() string { + return string(e) +} + +// ErrStdlib indicates that an error comes from the stdlib JSON adapter +var ErrStdlib jsonError = "error from the JSON adapter stdlib" + +var _ ifaces.Adapter = &Adapter{} + +type Adapter struct { +} + +// NewAdapter yields an [ifaces.Adapter] using the standard library. +func NewAdapter() *Adapter { + return &Adapter{} +} + +func (a *Adapter) Marshal(value any) ([]byte, error) { + return stdjson.Marshal(value) +} + +func (a *Adapter) Unmarshal(data []byte, value any) error { + return stdjson.Unmarshal(data, value) +} + +func (a *Adapter) OrderedMarshal(value ifaces.Ordered) ([]byte, error) { + w := poolOfWriters.Borrow() + defer func() { + poolOfWriters.Redeem(w) + }() + + if typeutils.IsNil(value) { + w.RawString("null") + + return w.BuildBytes() + } + + w.RawByte('{') + first := true + for k, v := range value.OrderedItems() { + if first { + first = false + } else { + w.RawByte(',') + } + + w.String(k) + w.RawByte(':') + + switch val := v.(type) { + case ifaces.Ordered: + w.Raw(a.OrderedMarshal(val)) + default: + w.Raw(stdjson.Marshal(v)) + } + } + + w.RawByte('}') + + return w.BuildBytes() +} + +func (a *Adapter) OrderedUnmarshal(data []byte, value ifaces.SetOrdered) error { + var m MapSlice + if err := m.OrderedUnmarshalJSON(data); err != nil { + return err + } + + if typeutils.IsNil(m) { + // force input value to nil + value.SetOrderedItems(nil) + + return nil + } + + value.SetOrderedItems(m.OrderedItems()) + + return nil +} + +func (a *Adapter) NewOrderedMap(capacity int) ifaces.OrderedMap { + m := make(MapSlice, 0, capacity) + + return &m +} + +// Redeem the [Adapter] when it comes from a pool. +// +// The adapter becomes immediately unusable once redeemed. +func (a *Adapter) Redeem() { + if a == nil { + return + } + + RedeemAdapter(a) +} + +func (a *Adapter) Reset() { +} diff --git a/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/doc.go b/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/doc.go new file mode 100644 index 000000000..5ea1b4404 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/doc.go @@ -0,0 +1,5 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +// Package json implements an [ifaces.Adapter] using the standard library. +package json diff --git a/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/lexer.go b/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/lexer.go new file mode 100644 index 000000000..b5aa1c797 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/lexer.go @@ -0,0 +1,320 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package json + +import ( + stdjson "encoding/json" + "errors" + "fmt" + "io" + "math" + "strconv" + + "github.com/go-openapi/swag/conv" +) + +type token struct { + stdjson.Token +} + +func (t token) String() string { + if t == invalidToken { + return "invalid token" + } + if t == eofToken { + return "EOF" + } + + return fmt.Sprintf("%v", t.Token) +} + +func (t token) Kind() tokenKind { + switch t.Token.(type) { + case nil: + return tokenNull + case stdjson.Delim: + return tokenDelim + case bool: + return tokenBool + case float64: + return tokenFloat + case stdjson.Number: + return tokenNumber + case string: + return tokenString + default: + return tokenUndef + } +} + +func (t token) Delim() byte { + r, ok := t.Token.(stdjson.Delim) + if !ok { + return 0 + } + + return byte(r) +} + +type tokenKind uint8 + +const ( + tokenUndef tokenKind = iota + tokenString + tokenNumber + tokenFloat + tokenBool + tokenNull + tokenDelim +) + +var ( + invalidToken = token{ + Token: stdjson.Token(struct{}{}), + } + + eofToken = token{ + Token: stdjson.Token(&struct{}{}), + } + + undefToken = token{ + Token: stdjson.Token(uint8(0)), + } +) + +// jlexer apes easyjson's jlexer, but uses the standard library decoder under the hood. +type jlexer struct { + buf *bytesReader + dec *stdjson.Decoder + err error + // current token + next token + // started bool +} + +type bytesReader struct { + buf []byte + offset int +} + +func (b *bytesReader) Reset() { + b.buf = nil + b.offset = 0 +} + +func (b *bytesReader) Read(p []byte) (int, error) { + if b.offset >= len(b.buf) { + return 0, io.EOF + } + + n := len(p) + buf := b.buf[b.offset:] + m := len(buf) + + if n >= m { + copy(p, buf) + b.offset += m + + return m, nil + } + + copy(p, buf[:n]) + b.offset += n + + return n, nil +} + +var _ io.Reader = &bytesReader{} + +func newLexer(data []byte) *jlexer { + l := &jlexer{ + // current: undefToken, + next: undefToken, + } + l.buf = &bytesReader{ + buf: data, + } + l.dec = stdjson.NewDecoder(l.buf) // unfortunately, cannot pool this + + return l +} + +func (l *jlexer) Reset() { + l.err = nil + l.next = undefToken + // leave l.dec and l.buf alone, since they are replaced at every Borrow +} + +func (l *jlexer) Error() error { + return l.err +} + +func (l *jlexer) SetErr(err error) { + l.err = err +} + +func (l *jlexer) Ok() bool { + return l.err == nil +} + +// NextToken consumes a token +func (l *jlexer) NextToken() token { + if !l.Ok() { + return invalidToken + } + + if l.next != undefToken { + next := l.next + l.next = undefToken + + return next + } + + return l.fetchToken() +} + +// PeekToken returns the next token without consuming it +func (l *jlexer) PeekToken() token { + if l.next == undefToken { + l.next = l.fetchToken() + } + + return l.next +} + +func (l *jlexer) Skip() { + _ = l.NextToken() +} + +func (l *jlexer) IsDelim(c byte) bool { + if !l.Ok() { + return false + } + + next := l.PeekToken() + if next.Kind() != tokenDelim { + return false + } + + if next.Delim() != c { + return false + } + + return true +} + +func (l *jlexer) IsNull() bool { + if !l.Ok() { + return false + } + + next := l.PeekToken() + + return next.Kind() == tokenNull +} + +func (l *jlexer) Delim(c byte) { + if !l.Ok() { + return + } + + tok := l.NextToken() + if tok.Kind() != tokenDelim { + l.err = fmt.Errorf("expected a delimiter token but got '%v': %w", tok, ErrStdlib) + + return + } + + if tok.Delim() != c { + l.err = fmt.Errorf("expected delimiter '%q' but got '%q': %w", c, tok.Delim(), ErrStdlib) + } +} + +func (l *jlexer) Null() { + if !l.Ok() { + return + } + + tok := l.NextToken() + if tok.Kind() != tokenNull { + l.err = fmt.Errorf("expected a null token but got '%v': %w", tok, ErrStdlib) + } +} + +func (l *jlexer) Number() any { + if !l.Ok() { + return 0 + } + + tok := l.NextToken() + + switch tok.Kind() { //nolint:exhaustive + case tokenNumber: + n := tok.Token.(stdjson.Number).String() + f, _ := strconv.ParseFloat(n, 64) + if conv.IsFloat64AJSONInteger(f) { + return int64(math.Trunc(f)) + } + + return f + + case tokenFloat: + f := tok.Token.(float64) + if conv.IsFloat64AJSONInteger(f) { + return int64(math.Trunc(f)) + } + + return f + + default: + l.err = fmt.Errorf("expected a number token but got '%v': %w", tok, ErrStdlib) + + return 0 + } +} + +func (l *jlexer) Bool() bool { + if !l.Ok() { + return false + } + + tok := l.NextToken() + if tok.Kind() != tokenBool { + l.err = fmt.Errorf("expected a bool token but got '%v': %w", tok, ErrStdlib) + + return false + } + + return tok.Token.(bool) +} + +func (l *jlexer) String() string { + if !l.Ok() { + return "" + } + + tok := l.NextToken() + if tok.Kind() != tokenString { + l.err = fmt.Errorf("expected a string token but got '%v': %w", tok, ErrStdlib) + + return "" + } + + return tok.Token.(string) +} + +// Commas and colons are elided. +func (l *jlexer) fetchToken() token { + jtok, err := l.dec.Token() + if err != nil { + if errors.Is(err, io.EOF) { + return eofToken + } + + l.err = errors.Join(err, ErrStdlib) + return invalidToken + } + + return token{Token: jtok} +} diff --git a/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/ordered_map.go b/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/ordered_map.go new file mode 100644 index 000000000..54deef406 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/ordered_map.go @@ -0,0 +1,266 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package json + +import ( + stdjson "encoding/json" + "fmt" + "iter" + + "github.com/go-openapi/swag/jsonutils/adapters/ifaces" +) + +var _ ifaces.OrderedMap = &MapSlice{} + +// MapSlice represents a JSON object, with the order of keys maintained. +type MapSlice []MapItem + +func (s MapSlice) OrderedItems() iter.Seq2[string, any] { + return func(yield func(string, any) bool) { + for _, item := range s { + if !yield(item.Key, item.Value) { + return + } + } + } +} + +func (s *MapSlice) SetOrderedItems(items iter.Seq2[string, any]) { + if items == nil { + *s = nil + + return + } + + m := *s + if len(m) > 0 { + // update mode + idx := make(map[string]int, len(m)) + + for i, item := range m { + idx[item.Key] = i + } + + for k, v := range items { + idx, ok := idx[k] + if ok { + m[idx].Value = v + + continue + } + m = append(m, MapItem{Key: k, Value: v}) + } + + *s = m + + return + } + + for k, v := range items { + m = append(m, MapItem{Key: k, Value: v}) + } + + *s = m +} + +// MarshalJSON renders a [MapSlice] as JSON bytes, preserving the order of keys. +func (s MapSlice) MarshalJSON() ([]byte, error) { + return s.OrderedMarshalJSON() +} + +func (s MapSlice) OrderedMarshalJSON() ([]byte, error) { + w := poolOfWriters.Borrow() + defer func() { + poolOfWriters.Redeem(w) + }() + + s.marshalObject(w) + + return w.BuildBytes() // this clones data, so it's okay to redeem the writer and its buffer +} + +// UnmarshalJSON builds a [MapSlice] from JSON bytes, preserving the order of keys. +// +// Inner objects are unmarshaled as [MapSlice] slices and not map[string]any. +func (s *MapSlice) UnmarshalJSON(data []byte) error { + return s.OrderedUnmarshalJSON(data) +} + +func (s *MapSlice) OrderedUnmarshalJSON(data []byte) error { + l := poolOfLexers.Borrow(data) + defer func() { + poolOfLexers.Redeem(l) + }() + + s.unmarshalObject(l) + + return l.Error() +} + +func (s MapSlice) marshalObject(w *jwriter) { + if s == nil { + w.RawString("null") + + return + } + + w.RawByte('{') + + if len(s) == 0 { + w.RawByte('}') + + return + } + + s[0].marshalJSON(w) + + for i := 1; i < len(s); i++ { + w.RawByte(',') + s[i].marshalJSON(w) + } + + w.RawByte('}') +} + +func (s *MapSlice) unmarshalObject(in *jlexer) { + if in.IsNull() { + in.Skip() + + return + } + + in.Delim('{') // consume token + if !in.Ok() { + return + } + + result := make(MapSlice, 0) + + for in.Ok() && !in.IsDelim('}') { + var mi MapItem + + mi.unmarshalKeyValue(in) + result = append(result, mi) + } + + in.Delim('}') + + if !in.Ok() { + return + } + + *s = result +} + +// MapItem represents the value of a key in a JSON object held by [MapSlice]. +// +// Notice that [MapItem] should not be marshaled to or unmarshaled from JSON directly, +// use this type as part of a [MapSlice] when dealing with JSON bytes. +type MapItem struct { + Key string + Value any +} + +func (s MapItem) marshalJSON(w *jwriter) { + w.String(s.Key) + w.RawByte(':') + w.Raw(stdjson.Marshal(s.Value)) +} + +func (s *MapItem) unmarshalKeyValue(in *jlexer) { + key := in.String() // consume string + value := s.asInterface(in) // consume any value, including termination tokens '}' or ']' + + if !in.Ok() { + return + } + + s.Key = key + s.Value = value +} + +func (s *MapItem) unmarshalArray(in *jlexer) []any { + if in.IsNull() { + in.Skip() + + return nil + } + + in.Delim('[') // consume token + if !in.Ok() { + return nil + } + + ret := make([]any, 0) + + for in.Ok() && !in.IsDelim(']') { + ret = append(ret, s.asInterface(in)) + } + + in.Delim(']') + if !in.Ok() { + return nil + } + + return ret +} + +// asInterface is very much like [jlexer.Lexer.Interface], but unmarshals an object +// into a [MapSlice], not a map[string]any. +// +// We have to force parsing errors somehow, since [jlexer.Lexer] doesn't let us +// set a parsing error directly. +func (s *MapItem) asInterface(in *jlexer) any { + if !in.Ok() { + return nil + } + + tok := in.PeekToken() // look-ahead what the next token looks like + kind := tok.Kind() + + switch kind { + case tokenString: + return in.String() // consume string + + case tokenNumber, tokenFloat: + return in.Number() + + case tokenBool: + return in.Bool() + + case tokenNull: + in.Null() + + return nil + + case tokenDelim: + switch tok.Delim() { + case '{': // not consumed yet + ret := make(MapSlice, 0) + ret.unmarshalObject(in) // consumes the terminating '}' + + if in.Ok() { + return ret + } + + // lexer is in an error state: will exhaust + return nil + + case '[': // not consumed yet + return s.unmarshalArray(in) // consumes the terminating ']' + default: + in.SetErr(fmt.Errorf("unexpected delimiter: %v: %w", tok, ErrStdlib)) // force error + return nil + } + + case tokenUndef: + fallthrough + default: + if in.Ok() { + in.SetErr(fmt.Errorf("unexpected token: %v: %w", tok, ErrStdlib)) // force error + } + + return nil + } +} diff --git a/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/pool.go b/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/pool.go new file mode 100644 index 000000000..709b97c30 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/pool.go @@ -0,0 +1,143 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package json + +import ( + "encoding/json" + "sync" + + "github.com/go-openapi/swag/jsonutils/adapters/ifaces" +) + +type adaptersPool struct { + sync.Pool +} + +func (p *adaptersPool) Borrow() *Adapter { + return p.Get().(*Adapter) +} + +func (p *adaptersPool) BorrowIface() ifaces.Adapter { + return p.Get().(*Adapter) +} + +func (p *adaptersPool) Redeem(a *Adapter) { + p.Put(a) +} + +type writersPool struct { + sync.Pool +} + +func (p *writersPool) Borrow() *jwriter { + ptr := p.Get() + + jw := ptr.(*jwriter) + jw.Reset() + + return jw +} + +func (p *writersPool) Redeem(w *jwriter) { + p.Put(w) +} + +type lexersPool struct { + sync.Pool +} + +func (p *lexersPool) Borrow(data []byte) *jlexer { + ptr := p.Get() + + l := ptr.(*jlexer) + l.buf = poolOfReaders.Borrow(data) + l.dec = json.NewDecoder(l.buf) // cannot pool, not exposed by the encoding/json API + l.Reset() + + return l +} + +func (p *lexersPool) Redeem(l *jlexer) { + l.dec = nil + discard := l.buf + l.buf = nil + poolOfReaders.Redeem(discard) + p.Put(l) +} + +type readersPool struct { + sync.Pool +} + +func (p *readersPool) Borrow(data []byte) *bytesReader { + ptr := p.Get() + + b := ptr.(*bytesReader) + b.Reset() + b.buf = data + + return b +} + +func (p *readersPool) Redeem(b *bytesReader) { + p.Put(b) +} + +var ( + poolOfAdapters = &adaptersPool{ + Pool: sync.Pool{ + New: func() any { + return NewAdapter() + }, + }, + } + + poolOfWriters = &writersPool{ + Pool: sync.Pool{ + New: func() any { + return newJWriter() + }, + }, + } + + poolOfLexers = &lexersPool{ + Pool: sync.Pool{ + New: func() any { + return newLexer(nil) + }, + }, + } + + poolOfReaders = &readersPool{ + Pool: sync.Pool{ + New: func() any { + return &bytesReader{} + }, + }, + } +) + +// BorrowAdapter borrows an [Adapter] from the pool, recycling already allocated instances. +func BorrowAdapter() *Adapter { + return poolOfAdapters.Borrow() +} + +// BorrowAdapterIface borrows a stdlib [Adapter] and converts it directly +// to [ifaces.Adapter]. This is useful to avoid further allocations when +// translating the concrete type into an interface. +func BorrowAdapterIface() ifaces.Adapter { + return poolOfAdapters.BorrowIface() +} + +// RedeemAdapter redeems an [Adapter] to the pool, so it may be recycled. +func RedeemAdapter(a *Adapter) { + poolOfAdapters.Redeem(a) +} + +func RedeemAdapterIface(a ifaces.Adapter) { + concrete, ok := a.(*Adapter) + if ok { + poolOfAdapters.Redeem(concrete) + } +} diff --git a/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/register.go b/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/register.go new file mode 100644 index 000000000..fc8818694 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/register.go @@ -0,0 +1,26 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package json + +import ( + "fmt" + "reflect" + + "github.com/go-openapi/swag/jsonutils/adapters/ifaces" +) + +func Register(dispatcher ifaces.Registrar) { + t := reflect.TypeOf(Adapter{}) + dispatcher.RegisterFor( + ifaces.RegistryEntry{ + Who: fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()), + What: ifaces.AllCapabilities, + Constructor: BorrowAdapterIface, + Support: support, + }) +} + +func support(_ ifaces.Capability, _ any) bool { + return true +} diff --git a/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/writer.go b/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/writer.go new file mode 100644 index 000000000..dc2325c1a --- /dev/null +++ b/vendor/github.com/go-openapi/swag/jsonutils/adapters/stdlib/json/writer.go @@ -0,0 +1,75 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + +package json + +import ( + "bytes" + "encoding/json" + "strings" +) + +type jwriter struct { + buf *bytes.Buffer + err error +} + +func newJWriter() *jwriter { + buf := make([]byte, 0, sensibleBufferSize) + + return &jwriter{buf: bytes.NewBuffer(buf)} +} + +func (w *jwriter) Reset() { + w.buf.Reset() + w.err = nil +} + +func (w *jwriter) RawString(s string) { + if w.err != nil { + return + } + w.buf.WriteString(s) +} + +func (w *jwriter) Raw(b []byte, err error) { + if w.err != nil { + return + } + if err != nil { + w.err = err + return + } + + _, _ = w.buf.Write(b) +} + +func (w *jwriter) RawByte(c byte) { + if w.err != nil { + return + } + w.buf.WriteByte(c) +} + +var quoteReplacer = strings.NewReplacer(`"`, `\"`, `\`, `\\`) + +func (w *jwriter) String(s string) { + if w.err != nil { + return + } + // escape quotes and \ + s = quoteReplacer.Replace(s) + + _ = w.buf.WriteByte('"') + json.HTMLEscape(w.buf, []byte(s)) + _ = w.buf.WriteByte('"') +} + +// BuildBytes returns a clone of the internal buffer. +func (w *jwriter) BuildBytes() ([]byte, error) { + if w.err != nil { + return nil, w.err + } + + return bytes.Clone(w.buf.Bytes()), nil +} diff --git a/vendor/github.com/go-openapi/swag/jsonutils/concat.go b/vendor/github.com/go-openapi/swag/jsonutils/concat.go index 4c96ec75d..2068503af 100644 --- a/vendor/github.com/go-openapi/swag/jsonutils/concat.go +++ b/vendor/github.com/go-openapi/swag/jsonutils/concat.go @@ -1,16 +1,5 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 package jsonutils @@ -32,7 +21,10 @@ func init() { } } -// ConcatJSON concatenates multiple json objects efficiently +// ConcatJSON concatenates multiple json objects or arrays efficiently. +// +// Note that [ConcatJSON] performs a very simple (and fast) concatenation +// operation: it does not attempt to merge objects. func ConcatJSON(blobs ...[]byte) []byte { if len(blobs) == 0 { return nil diff --git a/vendor/github.com/go-openapi/swag/jsonutils/doc.go b/vendor/github.com/go-openapi/swag/jsonutils/doc.go index 495ef8341..3926cc58d 100644 --- a/vendor/github.com/go-openapi/swag/jsonutils/doc.go +++ b/vendor/github.com/go-openapi/swag/jsonutils/doc.go @@ -1,16 +1,5 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 // Package jsonutils provides helpers to work with JSON. // diff --git a/vendor/github.com/go-openapi/swag/jsonutils/json.go b/vendor/github.com/go-openapi/swag/jsonutils/json.go index 62a3684ed..40753ce03 100644 --- a/vendor/github.com/go-openapi/swag/jsonutils/json.go +++ b/vendor/github.com/go-openapi/swag/jsonutils/json.go @@ -1,16 +1,5 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 package jsonutils @@ -18,30 +7,44 @@ import ( "bytes" "encoding/json" - "github.com/mailru/easyjson" - "github.com/mailru/easyjson/jlexer" - "github.com/mailru/easyjson/jwriter" + "github.com/go-openapi/swag/jsonutils/adapters" + "github.com/go-openapi/swag/jsonutils/adapters/ifaces" ) -type ejMarshaler = easyjson.Marshaler -type ejUnmarshaler = easyjson.Unmarshaler - // WriteJSON marshals a data structure as JSON. // // The difference with [json.Marshal] is that it may check among several alternatives // to do so. // -// Currently this allows types that are [easyjson.Marshaler]s to use that route to produce JSON. -func WriteJSON(value interface{}) ([]byte, error) { - if d, ok := value.(ejMarshaler); ok { - jw := new(jwriter.Writer) - d.MarshalEasyJSON(jw) - return jw.BuildBytes() +// See [adapters.Registrar] for more details about how to configure +// multiple serialization alternatives. +// +// NOTE: to allow types that are [easyjson.Marshaler] s to use that route to process JSON, +// you now need to register the adapter for easyjson at runtime. +func WriteJSON(value any) ([]byte, error) { + if orderedMap, isOrdered := value.(ifaces.Ordered); isOrdered { + orderedMarshaler := adapters.OrderedMarshalAdapterFor(orderedMap) + + if orderedMarshaler != nil { + defer orderedMarshaler.Redeem() + + return orderedMarshaler.OrderedMarshal(orderedMap) + } + + // no support found in registered adapters, fallback to the default (unordered) case } - if d, ok := value.(json.Marshaler); ok { - return d.MarshalJSON() + + marshaler := adapters.MarshalAdapterFor(value) + if marshaler != nil { + defer marshaler.Redeem() + + return marshaler.Marshal(value) } - return json.Marshal(value) + + // no support found in registered adapters, fallback to the default standard library. + // + // This only happens when tinkering with the global registry of adapters, since the default handles all the above cases. + return json.Marshal(value) // Codecov ignore // this is a safeguard not easily simulated in tests } // ReadJSON unmarshals JSON data into a data structure. @@ -49,28 +52,61 @@ func WriteJSON(value interface{}) ([]byte, error) { // The difference with [json.Unmarshal] is that it may check among several alternatives // to do so. // -// Currently this allows types that are [easyjson.Unmarshaler]s to use that route to process JSON. -func ReadJSON(data []byte, value interface{}) error { +// See [adapters.Registrar] for more details about how to configure +// multiple serialization alternatives. +// +// NOTE: value must be a pointer. +// +// If the provided value implements [ifaces.SetOrdered], it is a considered an "ordered map" and [ReadJSON] +// will favor an adapter that supports the [ifaces.OrderedUnmarshal] feature, or fallback to +// an unordered behavior if none is found. +// +// NOTE: to allow types that are [easyjson.Unmarshaler] s to use that route to process JSON, +// you now need to register the adapter for easyjson at runtime. +func ReadJSON(data []byte, value any) error { trimmedData := bytes.Trim(data, "\x00") - if d, ok := value.(ejUnmarshaler); ok { - jl := &jlexer.Lexer{Data: trimmedData} - d.UnmarshalEasyJSON(jl) - return jl.Error() + + if orderedMap, isOrdered := value.(ifaces.SetOrdered); isOrdered { + // if the value is an ordered map, favors support for OrderedUnmarshal. + + orderedUnmarshaler := adapters.OrderedUnmarshalAdapterFor(orderedMap) + + if orderedUnmarshaler != nil { + defer orderedUnmarshaler.Redeem() + + return orderedUnmarshaler.OrderedUnmarshal(trimmedData, orderedMap) + } + + // no support found in registered adapters, fallback to the default (unordered) case } - if d, ok := value.(json.Unmarshaler); ok { - return d.UnmarshalJSON(trimmedData) + unmarshaler := adapters.UnmarshalAdapterFor(value) + if unmarshaler != nil { + defer unmarshaler.Redeem() + + return unmarshaler.Unmarshal(trimmedData, value) } - return json.Unmarshal(trimmedData, value) + // no support found in registered adapters, fallback to the default standard library. + // + // This only happens when tinkering with the global registry of adapters, since the default handles all the above cases. + return json.Unmarshal(trimmedData, value) // Codecov ignore // this is a safeguard not easily simulated in tests } // FromDynamicJSON turns a go value into a properly JSON typed structure. // -// "Dynamic JSON" refers to what you get when unmarshaling JSON into an untyped interface{}, -// i.e. objects are represented by map[string]interface{}, arrays by []interface{}, and +// "Dynamic JSON" refers to what you get when unmarshaling JSON into an untyped any, +// i.e. objects are represented by map[string]any, arrays by []any, and // all numbers are represented as float64. -func FromDynamicJSON(source, target interface{}) error { +// +// NOTE: target must be a pointer. +// +// # Maintaining the order of keys in objects +// +// If source and target implement [ifaces.Ordered] and [ifaces.SetOrdered] respectively, +// they are considered "ordered maps" and the order of keys is maintained in the +// "jsonification" process. In that case, map[string]any values are replaced by (ordered) [JSONMapSlice] ones. +func FromDynamicJSON(source, target any) error { b, err := WriteJSON(source) if err != nil { return err diff --git a/vendor/github.com/go-openapi/swag/jsonutils/ordered_map.go b/vendor/github.com/go-openapi/swag/jsonutils/ordered_map.go index e11fd4bfb..38dd3e244 100644 --- a/vendor/github.com/go-openapi/swag/jsonutils/ordered_map.go +++ b/vendor/github.com/go-openapi/swag/jsonutils/ordered_map.go @@ -1,199 +1,114 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 package jsonutils import ( - "strconv" - "strings" + "iter" - "github.com/mailru/easyjson/jlexer" - "github.com/mailru/easyjson/jwriter" + "github.com/go-openapi/swag/jsonutils/adapters" + "github.com/go-openapi/swag/typeutils" ) // JSONMapSlice represents a JSON object, with the order of keys maintained. +// +// It behaves like an ordered map, but keys can't be accessed in constant time. type JSONMapSlice []JSONMapItem -// MarshalJSON renders a [JSONMapSlice] as JSON bytes, preserving the order of keys. -func (s JSONMapSlice) MarshalJSON() ([]byte, error) { - w := &jwriter.Writer{Flags: jwriter.NilMapAsEmpty | jwriter.NilSliceAsEmpty} - s.MarshalEasyJSON(w) - - return w.BuildBytes() +// OrderedItems iterates over all (key,value) pairs with the order of keys maintained. +// +// This implements the [ifaces.Ordered] interface, so that [ifaces.Adapter] s know how to marshal +// keys in the desired order. +func (s JSONMapSlice) OrderedItems() iter.Seq2[string, any] { + return func(yield func(string, any) bool) { + for _, item := range s { + if !yield(item.Key, item.Value) { + return + } + } + } } -// MarshalEasyJSON renders a [JSONMapSlice] as JSON bytes, using easyJSON -func (s JSONMapSlice) MarshalEasyJSON(w *jwriter.Writer) { - if s == nil { - w.RawString("null") +// SetOrderedItems sets keys in the [JSONMapSlice] objects, as presented by +// the provided iterator. +// +// As a special case, if items is nil, this sets to receiver to a nil slice. +// +// This implements the [ifaces.SetOrdered] interface, so that [ifaces.Adapter] s know how to unmarshal +// keys in the desired order. +func (s *JSONMapSlice) SetOrderedItems(items iter.Seq2[string, any]) { + if items == nil { + // force receiver to be a nil slice + *s = nil return } - w.RawByte('{') + m := *s + if len(m) > 0 { + // update mode: short-circuited when unmarshaling fresh data structures + idx := make(map[string]int, len(m)) + + for i, item := range m { + idx[item.Key] = i + } + + for k, v := range items { + idx, ok := idx[k] + if ok { + m[idx].Value = v + + continue + } + + m = append(m, JSONMapItem{Key: k, Value: v}) + } - if len(s) == 0 { - w.RawByte('}') + *s = m return } - s[0].MarshalEasyJSON(w) - - for i := 1; i < len(s); i++ { - w.RawByte(',') - s[i].MarshalEasyJSON(w) + for k, v := range items { + m = append(m, JSONMapItem{Key: k, Value: v}) } - w.RawByte('}') + *s = m } -// UnmarshalJSON builds a [JSONMapSlice] from JSON bytes, preserving the order of keys. +// MarshalJSON renders a [JSONMapSlice] as JSON bytes, preserving the order of keys. // -// Inner objects are unmarshaled as [JSONMapSlice] slices and not map[string]any. -func (s *JSONMapSlice) UnmarshalJSON(data []byte) error { - l := jlexer.Lexer{Data: data} - s.UnmarshalEasyJSON(&l) +// It will pick the JSON library currently configured by the [adapters.Registry] (defaults to the standard library). +func (s JSONMapSlice) MarshalJSON() ([]byte, error) { + orderedMarshaler := adapters.OrderedMarshalAdapterFor(s) + defer orderedMarshaler.Redeem() - return l.Error() + return orderedMarshaler.OrderedMarshal(s) } -// UnmarshalEasyJSON builds a [JSONMapSlice] from JSON bytes, using easyJSON -func (s *JSONMapSlice) UnmarshalEasyJSON(in *jlexer.Lexer) { - if in.IsNull() { - in.Skip() - - return +// UnmarshalJSON builds a [JSONMapSlice] from JSON bytes, preserving the order of keys. +// +// Inner objects are unmarshaled as ordered [JSONMapSlice] slices and not map[string]any. +// +// It will pick the JSON library currently configured by the [adapters.Registry] (defaults to the standard library). +func (s *JSONMapSlice) UnmarshalJSON(data []byte) error { + if typeutils.IsNil(*s) { + // allow to unmarshal with a simple var declaration (nil slice) + *s = JSONMapSlice{} } - result := make(JSONMapSlice, 0) - in.Delim('{') - for !in.IsDelim('}') { - var mi JSONMapItem - mi.UnmarshalEasyJSON(in) - result = append(result, mi) - } - in.Delim('}') + orderedUnmarshaler := adapters.OrderedUnmarshalAdapterFor(s) + defer orderedUnmarshaler.Redeem() - *s = result + return orderedUnmarshaler.OrderedUnmarshal(data, s) } // JSONMapItem represents the value of a key in a JSON object held by [JSONMapSlice]. // -// Notice that JSONMapItem should not be marshaled to or unmarshaled from JSON directly, -// use this type as part of a [JSONMapSlice] when dealing with JSON bytes. +// Notice that JSONMapItem should not be marshaled to or unmarshaled from JSON directly. +// +// Use this type as part of a [JSONMapSlice] when dealing with JSON bytes. type JSONMapItem struct { Key string Value any } - -// MarshalEasyJSON renders a [JSONMapItem] as JSON bytes, using easyJSON -func (s JSONMapItem) MarshalEasyJSON(w *jwriter.Writer) { - w.String(s.Key) - w.RawByte(':') - w.Raw(WriteJSON(s.Value)) -} - -// UnmarshalEasyJSON builds a [JSONMapItem] from JSON bytes, using easyJSON -func (s *JSONMapItem) UnmarshalEasyJSON(in *jlexer.Lexer) { - key := in.UnsafeString() - in.WantColon() - value := s.asInterface(in) - in.WantComma() - - s.Key = key - s.Value = value -} - -// asInterface is very much like [jlexer.Lexer.Interface], but unmarshals an object -// into a [JSONMapSlice], not a map[string]any. -// -// We have to force parsing errors somehow, since [jlexer.Lexer] doesn't let us -// set a parsing error directly. -func (s *JSONMapItem) asInterface(in *jlexer.Lexer) any { - tokenKind := in.CurrentToken() - - if !in.Ok() { - return nil - } - - switch tokenKind { - case jlexer.TokenString: - return in.String() - - case jlexer.TokenNumber: - // determine if we may use an integer type - n := in.JsonNumber().String() - if strings.ContainsRune(n, '.') { - f, _ := strconv.ParseFloat(n, 64) - return f - } - - i, _ := strconv.ParseInt(n, 10, 64) - return i - - case jlexer.TokenBool: - return in.Bool() - - case jlexer.TokenNull: - in.Null() - return nil - - case jlexer.TokenDelim: - if in.IsDelim('{') { - ret := make(JSONMapSlice, 0) - ret.UnmarshalEasyJSON(in) - - if in.Ok() { - return ret - } - - // lexer is in an error state: will exhaust - return nil - } - - if in.IsDelim('[') { - in.Delim('[') // consume - - ret := []interface{}{} - for !in.IsDelim(']') { - ret = append(ret, s.asInterface(in)) - in.WantComma() - } - in.Delim(']') - - if in.Ok() { - return ret - } - - // lexer is in an error state: will exhaust - return nil - } - - if in.Ok() { - in.Delim('{') // force error - } - - return nil - - case jlexer.TokenUndef: - fallthrough - default: - if in.Ok() { - in.Delim('{') // force error - } - - return nil - } -} diff --git a/vendor/github.com/go-openapi/swag/jsonutils_iface.go b/vendor/github.com/go-openapi/swag/jsonutils_iface.go index 63e23f0b6..7bd4105fa 100644 --- a/vendor/github.com/go-openapi/swag/jsonutils_iface.go +++ b/vendor/github.com/go-openapi/swag/jsonutils_iface.go @@ -1,16 +1,5 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 package swag @@ -33,17 +22,17 @@ type JSONMapItem = jsonutils.JSONMapItem // WriteJSON writes json data. // // Deprecated: use [jsonutils.WriteJSON] instead. -func WriteJSON(data interface{}) ([]byte, error) { return jsonutils.WriteJSON(data) } +func WriteJSON(data any) ([]byte, error) { return jsonutils.WriteJSON(data) } // ReadJSON reads json data. // // Deprecated: use [jsonutils.ReadJSON] instead. -func ReadJSON(data []byte, value interface{}) error { return jsonutils.ReadJSON(data, value) } +func ReadJSON(data []byte, value any) error { return jsonutils.ReadJSON(data, value) } // DynamicJSONToStruct converts an untyped JSON structure into a target data type. // // Deprecated: use [jsonutils.FromDynamicJSON] instead. -func DynamicJSONToStruct(data interface{}, target interface{}) error { +func DynamicJSONToStruct(data any, target any) error { return jsonutils.FromDynamicJSON(data, target) } @@ -57,8 +46,8 @@ func ConcatJSON(blobs ...[]byte) []byte { return jsonutils.ConcatJSON(blobs...) // It is the same as [FromDynamicJSON], but doesn't check for errors. // // Deprecated: this function is a misnomer and is unsafe. Use [jsonutils.FromDynamicJSON] instead. -func ToDynamicJSON(value interface{}) interface{} { - var res interface{} +func ToDynamicJSON(value any) any { + var res any if err := FromDynamicJSON(value, &res); err != nil { log.Println(err) } @@ -68,9 +57,9 @@ func ToDynamicJSON(value interface{}) interface{} { // FromDynamicJSON turns a go value into a properly JSON typed structure. // -// "Dynamic JSON" refers to what you get when unmarshaling JSON into an untyped interface{}, -// i.e. objects are represented by map[string]interface{}, arrays by []interface{}, and all -// scalar values are interface{}. +// "Dynamic JSON" refers to what you get when unmarshaling JSON into an untyped any, +// i.e. objects are represented by map[string]any, arrays by []any, and all +// scalar values are any. // // Deprecated: use [jsonutils.FromDynamicJSON] instead. -func FromDynamicJSON(data, target interface{}) error { return jsonutils.FromDynamicJSON(data, target) } +func FromDynamicJSON(data, target any) error { return jsonutils.FromDynamicJSON(data, target) } diff --git a/vendor/github.com/go-openapi/swag/loading/doc.go b/vendor/github.com/go-openapi/swag/loading/doc.go index 62585615e..8cf7bcb8b 100644 --- a/vendor/github.com/go-openapi/swag/loading/doc.go +++ b/vendor/github.com/go-openapi/swag/loading/doc.go @@ -1,16 +1,5 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 // Package loading provides tools to load a file from http or from a local file system. package loading diff --git a/vendor/github.com/go-openapi/swag/loading/errors.go b/vendor/github.com/go-openapi/swag/loading/errors.go index ca45732a7..b3964289c 100644 --- a/vendor/github.com/go-openapi/swag/loading/errors.go +++ b/vendor/github.com/go-openapi/swag/loading/errors.go @@ -1,16 +1,5 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 package loading diff --git a/vendor/github.com/go-openapi/swag/loading/json.go b/vendor/github.com/go-openapi/swag/loading/json.go index aadf99913..59db12f5c 100644 --- a/vendor/github.com/go-openapi/swag/loading/json.go +++ b/vendor/github.com/go-openapi/swag/loading/json.go @@ -1,3 +1,6 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + package loading import ( diff --git a/vendor/github.com/go-openapi/swag/loading/loading.go b/vendor/github.com/go-openapi/swag/loading/loading.go index bd955535f..269fb74d1 100644 --- a/vendor/github.com/go-openapi/swag/loading/loading.go +++ b/vendor/github.com/go-openapi/swag/loading/loading.go @@ -1,16 +1,5 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 package loading diff --git a/vendor/github.com/go-openapi/swag/loading/options.go b/vendor/github.com/go-openapi/swag/loading/options.go index a51329e93..6674ac69e 100644 --- a/vendor/github.com/go-openapi/swag/loading/options.go +++ b/vendor/github.com/go-openapi/swag/loading/options.go @@ -1,16 +1,5 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 package loading @@ -81,7 +70,7 @@ func WithCustomHeaders(headers map[string]string) Option { } } -// WithHTTClient overrides the default HTTP client used to fetch a remote file. +// WithHTTPClient overrides the default HTTP client used to fetch a remote file. // // By default, [http.DefaultClient] is used. func WithHTTPClient(client *http.Client) Option { @@ -90,7 +79,7 @@ func WithHTTPClient(client *http.Client) Option { } } -// WithFileFS sets a file system for the local file loader. +// WithFS sets a file system for the local file loader. // // If the provided file system is a [fs.ReadFileFS], the ReadFile function is used. // Otherwise, ReadFile is wrapped using [fs.ReadFile]. diff --git a/vendor/github.com/go-openapi/swag/loading/yaml.go b/vendor/github.com/go-openapi/swag/loading/yaml.go index 40bd2a769..3ebb53668 100644 --- a/vendor/github.com/go-openapi/swag/loading/yaml.go +++ b/vendor/github.com/go-openapi/swag/loading/yaml.go @@ -1,16 +1,5 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 package loading @@ -38,7 +27,7 @@ func YAMLDoc(path string, opts ...Option) (json.RawMessage, error) { } // YAMLData loads a yaml document from either http or a file. -func YAMLData(path string, opts ...Option) (interface{}, error) { +func YAMLData(path string, opts ...Option) (any, error) { data, err := LoadFromFileOrHTTP(path, opts...) if err != nil { return nil, err diff --git a/vendor/github.com/go-openapi/swag/loading_iface.go b/vendor/github.com/go-openapi/swag/loading_iface.go index 38d825bc5..27ec3fb8c 100644 --- a/vendor/github.com/go-openapi/swag/loading_iface.go +++ b/vendor/github.com/go-openapi/swag/loading_iface.go @@ -1,16 +1,5 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 package swag @@ -84,7 +73,7 @@ func YAMLDoc(path string) (json.RawMessage, error) { // YAMLData loads a yaml document from either http or a file. // // Deprecated: use [loading.YAMLData] instead. -func YAMLData(path string) (interface{}, error) { +func YAMLData(path string) (any, error) { return loading.YAMLData(path) } diff --git a/vendor/github.com/go-openapi/swag/mangling/doc.go b/vendor/github.com/go-openapi/swag/mangling/doc.go index dbe806828..ce0d89048 100644 --- a/vendor/github.com/go-openapi/swag/mangling/doc.go +++ b/vendor/github.com/go-openapi/swag/mangling/doc.go @@ -1,16 +1,5 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 // Package mangling provides name mangling capabilities. // diff --git a/vendor/github.com/go-openapi/swag/mangling/initialism_index.go b/vendor/github.com/go-openapi/swag/mangling/initialism_index.go index cf0786f81..e5b70c149 100644 --- a/vendor/github.com/go-openapi/swag/mangling/initialism_index.go +++ b/vendor/github.com/go-openapi/swag/mangling/initialism_index.go @@ -1,16 +1,5 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 package mangling @@ -190,6 +179,19 @@ const ( simplePlural ) +func (f pluralForm) String() string { + switch f { + case notPlural: + return "notPlural" + case invariantPlural: + return "invariantPlural" + case simplePlural: + return "simplePlural" + default: + return "" + } +} + // pluralForm indicates how we want to pluralize a given initialism. // // Besides configured invariant forms (like HTTP and HTTPS), diff --git a/vendor/github.com/go-openapi/swag/mangling/name_lexem.go b/vendor/github.com/go-openapi/swag/mangling/name_lexem.go index 02004b4f3..bc837e3b9 100644 --- a/vendor/github.com/go-openapi/swag/mangling/name_lexem.go +++ b/vendor/github.com/go-openapi/swag/mangling/name_lexem.go @@ -1,16 +1,5 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 package mangling diff --git a/vendor/github.com/go-openapi/swag/mangling/name_mangler.go b/vendor/github.com/go-openapi/swag/mangling/name_mangler.go index 94ae555a7..da685681d 100644 --- a/vendor/github.com/go-openapi/swag/mangling/name_mangler.go +++ b/vendor/github.com/go-openapi/swag/mangling/name_mangler.go @@ -1,16 +1,5 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 package mangling diff --git a/vendor/github.com/go-openapi/swag/mangling/options.go b/vendor/github.com/go-openapi/swag/mangling/options.go index 66ad2e46c..3c92b2f18 100644 --- a/vendor/github.com/go-openapi/swag/mangling/options.go +++ b/vendor/github.com/go-openapi/swag/mangling/options.go @@ -1,16 +1,5 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 package mangling diff --git a/vendor/github.com/go-openapi/swag/mangling/pools.go b/vendor/github.com/go-openapi/swag/mangling/pools.go index d85b40387..f81043514 100644 --- a/vendor/github.com/go-openapi/swag/mangling/pools.go +++ b/vendor/github.com/go-openapi/swag/mangling/pools.go @@ -1,16 +1,5 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 package mangling diff --git a/vendor/github.com/go-openapi/swag/mangling/split.go b/vendor/github.com/go-openapi/swag/mangling/split.go index 40e4a2e0e..ed12ea256 100644 --- a/vendor/github.com/go-openapi/swag/mangling/split.go +++ b/vendor/github.com/go-openapi/swag/mangling/split.go @@ -1,20 +1,10 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 package mangling import ( + "fmt" "unicode" ) @@ -47,6 +37,13 @@ type ( initialismMatches []initialismMatch ) +// String representation of a match, e.g. for debugging. +func (m initialismMatch) String() string { + return fmt.Sprintf("{body: %s (%d), start: %d, end; %d, complete: %t, hasPlural: %v}", + string(m.body), len(m.body), m.start, m.end, m.complete, m.hasPlural, + ) +} + func (m initialismMatch) isZero() bool { return m.start == 0 && m.end == 0 } @@ -83,116 +80,148 @@ func (s splitter) split(name string) *[]nameLexem { } func (s splitter) gatherInitialismMatches(nameRunes []rune) *initialismMatches { - var matches *initialismMatches + matches := poolOfMatches.BorrowMatches() + const minLenInitialism = 1 + if len(nameRunes) < minLenInitialism+1 { + // can't match initialism with 0 or 1 rune + return matches + } + + // first iteration + s.findMatches(matches, nameRunes, nameRunes[0], 0) - for currentRunePosition, currentRune := range nameRunes { - // recycle these allocations as we loop over runes + for i, currentRune := range nameRunes[1:] { + currentRunePosition := i + 1 + // recycle allocations as we loop over runes // with such recycling, only 2 slices should be allocated per call // instead of o(n). + // + // BorrowMatches always yields slices with zero length (with some capacity) newMatches := poolOfMatches.BorrowMatches() // check current initialism matches - if matches != nil { // skip first iteration - for _, match := range *matches { - if keepCompleteMatch := match.complete; keepCompleteMatch { - *newMatches = append(*newMatches, match) - - // the match is complete: keep it then move on to next rune - continue - } + for _, match := range *matches { + if keepCompleteMatch := match.complete; keepCompleteMatch { + // the match is already complete: keep it then move on to the next match + *newMatches = append(*newMatches, match) + continue + } - currentMatchRune := match.body[currentRunePosition-match.start] - if currentMatchRune != currentRune { - // failed match, move on to next rune - continue - } + if currentRunePosition-match.start == len(match.body) { + // unmatched: skip + continue + } - // try to complete ongoing match - if currentRunePosition-match.start == len(match.body)-1 { - // we are close; the next step is to check the symbol ahead - // if it is a lowercase letter, then it is not the end of match - // but the beginning of the next word. - // - // NOTE(fredbi): this heuristic sometimes leads to counterintuitive splits and - // perhaps (not sure yet) we should check against case _alternance_. - // - // Example: - // - // In the current version, in the sentence "IDS initialism", "ID" is recognized as an initialism, - // leading to a split like "id_s_initialism" (or IDSInitialism), - // whereas in the sentence "IDx initialism", it is not and produces something like - // "i_d_x_initialism" (or IDxInitialism). The generated file name is not great. - // - // Both go identifiers are tolerated by linters. - // - // Notice that the slightly different input "IDs initialism" is correctly detected - // as a pluralized initialism and produces something like "ids_initialism" (or IDsInitialism). - - if currentRunePosition < len(nameRunes)-1 { - nextRune := nameRunes[currentRunePosition+1] - - // recognize a plural form for this initialism (only simple pluralization is supported) - if nextRune == 's' && match.hasPlural == simplePlural { - // detected a pluralized initialism - match.body = append(match.body, nextRune) - currentRunePosition++ - if currentRunePosition < len(nameRunes)-1 { - nextRune = nameRunes[currentRunePosition+1] - if newWord := unicode.IsLower(nextRune); newWord { - // it is the start of a new word. - // Match is only partial and the initialism is not recognized : move on - continue - } - } + // 1. by construction of the matches, we can't have currentRunePosition - match.start < 0 + // because matches have been computed with their start <= currentRunePosition in the previous + // iterations. + // 2. by construction of the matches, we can't have currentRunePosition - match.start >= len(match.body) - // this is a pluralized match: keep it - match.complete = true - match.hasPlural = simplePlural - match.end = currentRunePosition - *newMatches = append(*newMatches, match) + currentMatchRune := match.body[currentRunePosition-match.start] + if currentMatchRune != currentRune { + // failed match, discard it then move on to the next match + continue + } - // match is complete: keep it then move on to next rune - continue + // try to complete the current match + if currentRunePosition-match.start == len(match.body)-1 { + // we are close: the next step is to check the symbol ahead + // if it is a lowercase letter, then it is not the end of match + // but the beginning of the next word. + // + // NOTE(fredbi): this heuristic sometimes leads to counterintuitive splits and + // perhaps (not sure yet) we should check against case _alternance_. + // + // Example: + // + // In the current version, in the sentence "IDS initialism", "ID" is recognized as an initialism, + // leading to a split like "id_s_initialism" (or IDSInitialism), + // whereas in the sentence "IDx initialism", it is not and produces something like + // "i_d_x_initialism" (or IDxInitialism). The generated file name is not great. + // + // Both go identifiers are tolerated by linters. + // + // Notice that the slightly different input "IDs initialism" is correctly detected + // as a pluralized initialism and produces something like "ids_initialism" (or IDsInitialism). + + if currentRunePosition < len(nameRunes)-1 { // when before the last rune + nextRune := nameRunes[currentRunePosition+1] + + // recognize a plural form for this initialism (only simple english pluralization is supported). + if nextRune == 's' && match.hasPlural == simplePlural { + // detected a pluralized initialism + match.body = append(match.body, nextRune) + lookAhead := currentRunePosition + 1 + if lookAhead < len(nameRunes)-1 { + nextRune = nameRunes[lookAhead+1] + if newWord := unicode.IsLower(nextRune); newWord { + // it is the start of a new word. + // Match is only partial and the initialism is not recognized: + // move on to the next match, but do not advance the rune position + continue + } } - if newWord := unicode.IsLower(nextRune); newWord { - // it is the start of a new word - // Match is only partial and the initialism is not recognized : move on - continue - } + // this is a pluralized match: keep it + currentRunePosition++ + match.complete = true + match.hasPlural = simplePlural + match.end = currentRunePosition + *newMatches = append(*newMatches, match) + + // match is complete: keep it then move on to the next match + continue } - match.complete = true - match.end = currentRunePosition + // other cases + // example: invariant plural such as "TLS" + if newWord := unicode.IsLower(nextRune); newWord { + // it is the start of a new word + // Match is only partial and the initialism is not recognized : move on + continue + } } - // append the ongoing matching attempt (not necessarily complete) - *newMatches = append(*newMatches, match) + match.complete = true + match.end = currentRunePosition } - } - // check for new initialism matches - for i, r := range s.initialismsRunes { - if r[0] == currentRune { - *newMatches = append(*newMatches, initialismMatch{ - start: currentRunePosition, - body: r, - complete: false, - hasPlural: s.initialismsPluralForm[i], - }) - } + // append the ongoing matching attempt: it is not necessarily complete, but was successful so far. + // Let's see if it still matches on the next rune. + *newMatches = append(*newMatches, match) } - if matches != nil { - poolOfMatches.RedeemMatches(matches) - } + s.findMatches(newMatches, nameRunes, currentRune, currentRunePosition) + + poolOfMatches.RedeemMatches(matches) matches = newMatches } - // up to the caller to redeem this last slice + // it is up to the caller to redeem this last slice return matches } +func (s splitter) findMatches(newMatches *initialismMatches, nameRunes []rune, currentRune rune, currentRunePosition int) { + // check for new initialism matches, based on the first character + for i, r := range s.initialismsRunes { + if r[0] != currentRune { + continue + } + + if currentRunePosition+len(r) > len(nameRunes) { + continue // not eligible: would spilll over the initial string + } + + // possible matches: all initialisms starting with the current rune and that can fit the given string (nameRunes) + *newMatches = append(*newMatches, initialismMatch{ + start: currentRunePosition, + body: r, + complete: false, + hasPlural: s.initialismsPluralForm[i], + }) + } +} + func (s splitter) mapMatchesToNameLexems(nameRunes []rune, matches *initialismMatches) *[]nameLexem { nameLexems := poolOfLexems.BorrowLexems() diff --git a/vendor/github.com/go-openapi/swag/mangling/string_bytes.go b/vendor/github.com/go-openapi/swag/mangling/string_bytes.go index 06351434d..28daaf72b 100644 --- a/vendor/github.com/go-openapi/swag/mangling/string_bytes.go +++ b/vendor/github.com/go-openapi/swag/mangling/string_bytes.go @@ -1,16 +1,5 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 package mangling diff --git a/vendor/github.com/go-openapi/swag/mangling/util.go b/vendor/github.com/go-openapi/swag/mangling/util.go index c289dc6bd..0636417e3 100644 --- a/vendor/github.com/go-openapi/swag/mangling/util.go +++ b/vendor/github.com/go-openapi/swag/mangling/util.go @@ -1,16 +1,5 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 package mangling diff --git a/vendor/github.com/go-openapi/swag/mangling_iface.go b/vendor/github.com/go-openapi/swag/mangling_iface.go index 2d0d07ddb..98b9a9992 100644 --- a/vendor/github.com/go-openapi/swag/mangling_iface.go +++ b/vendor/github.com/go-openapi/swag/mangling_iface.go @@ -1,16 +1,5 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 package swag diff --git a/vendor/github.com/go-openapi/swag/netutils/doc.go b/vendor/github.com/go-openapi/swag/netutils/doc.go index ed6d8a022..74282f8e5 100644 --- a/vendor/github.com/go-openapi/swag/netutils/doc.go +++ b/vendor/github.com/go-openapi/swag/netutils/doc.go @@ -1,16 +1,5 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 // Package netutils provides helpers for network-related tasks. package netutils diff --git a/vendor/github.com/go-openapi/swag/netutils/net.go b/vendor/github.com/go-openapi/swag/netutils/net.go index 3d0182fc5..82a1544af 100644 --- a/vendor/github.com/go-openapi/swag/netutils/net.go +++ b/vendor/github.com/go-openapi/swag/netutils/net.go @@ -1,16 +1,5 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 package netutils diff --git a/vendor/github.com/go-openapi/swag/netutils_iface.go b/vendor/github.com/go-openapi/swag/netutils_iface.go index 537314e36..d658de25b 100644 --- a/vendor/github.com/go-openapi/swag/netutils_iface.go +++ b/vendor/github.com/go-openapi/swag/netutils_iface.go @@ -1,16 +1,5 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 package swag diff --git a/vendor/github.com/go-openapi/swag/stringutils/collection_formats.go b/vendor/github.com/go-openapi/swag/stringutils/collection_formats.go index 1ff96dcbd..28056ad25 100644 --- a/vendor/github.com/go-openapi/swag/stringutils/collection_formats.go +++ b/vendor/github.com/go-openapi/swag/stringutils/collection_formats.go @@ -1,16 +1,5 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 package stringutils diff --git a/vendor/github.com/go-openapi/swag/stringutils/doc.go b/vendor/github.com/go-openapi/swag/stringutils/doc.go index b5d18e517..c6d17a116 100644 --- a/vendor/github.com/go-openapi/swag/stringutils/doc.go +++ b/vendor/github.com/go-openapi/swag/stringutils/doc.go @@ -1,16 +1,5 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 // Package stringutils exposes helpers to search and process strings. package stringutils diff --git a/vendor/github.com/go-openapi/swag/stringutils/strings.go b/vendor/github.com/go-openapi/swag/stringutils/strings.go index 086592317..cd792b7d0 100644 --- a/vendor/github.com/go-openapi/swag/stringutils/strings.go +++ b/vendor/github.com/go-openapi/swag/stringutils/strings.go @@ -1,16 +1,5 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 package stringutils diff --git a/vendor/github.com/go-openapi/swag/stringutils_iface.go b/vendor/github.com/go-openapi/swag/stringutils_iface.go index 00d7e0212..dbfa48484 100644 --- a/vendor/github.com/go-openapi/swag/stringutils_iface.go +++ b/vendor/github.com/go-openapi/swag/stringutils_iface.go @@ -1,16 +1,5 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 package swag diff --git a/vendor/github.com/go-openapi/swag/typeutils/doc.go b/vendor/github.com/go-openapi/swag/typeutils/doc.go index 67e49d12e..66bed20df 100644 --- a/vendor/github.com/go-openapi/swag/typeutils/doc.go +++ b/vendor/github.com/go-openapi/swag/typeutils/doc.go @@ -1,16 +1,5 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 // Package typeutils exposes utilities to inspect generic types. package typeutils diff --git a/vendor/github.com/go-openapi/swag/typeutils/types.go b/vendor/github.com/go-openapi/swag/typeutils/types.go index 8a29aa9c2..55487a673 100644 --- a/vendor/github.com/go-openapi/swag/typeutils/types.go +++ b/vendor/github.com/go-openapi/swag/typeutils/types.go @@ -1,16 +1,5 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 package typeutils @@ -22,11 +11,18 @@ type zeroable interface { // IsZero returns true when the value passed into the function is a zero value. // This allows for safer checking of interface values. -func IsZero(data interface{}) bool { +func IsZero(data any) bool { v := reflect.ValueOf(data) // check for nil data switch v.Kind() { //nolint:exhaustive - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + case + reflect.Interface, + reflect.Func, + reflect.Chan, + reflect.Pointer, + reflect.UnsafePointer, + reflect.Map, + reflect.Slice: if v.IsNil() { return true } @@ -57,3 +53,28 @@ func IsZero(data interface{}) bool { return false } } + +// IsNil checks if input is nil. +// +// For types chan, func, interface, map, pointer, or slice it returns true if its argument is nil. +// +// See [reflect.Value.IsNil]. +func IsNil(input any) bool { + if input == nil { + return true + } + + kind := reflect.TypeOf(input).Kind() + switch kind { //nolint:exhaustive + case reflect.Pointer, + reflect.UnsafePointer, + reflect.Map, + reflect.Slice, + reflect.Chan, + reflect.Interface, + reflect.Func: + return reflect.ValueOf(input).IsNil() + default: + return false + } +} diff --git a/vendor/github.com/go-openapi/swag/typeutils_iface.go b/vendor/github.com/go-openapi/swag/typeutils_iface.go index b104a8040..b63813ea4 100644 --- a/vendor/github.com/go-openapi/swag/typeutils_iface.go +++ b/vendor/github.com/go-openapi/swag/typeutils_iface.go @@ -1,16 +1,5 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 package swag @@ -20,4 +9,4 @@ import "github.com/go-openapi/swag/typeutils" // This allows for safer checking of interface values. // // Deprecated: use [typeutils.IsZero] instead. -func IsZero(data interface{}) bool { return typeutils.IsZero(data) } +func IsZero(data any) bool { return typeutils.IsZero(data) } diff --git a/vendor/github.com/go-openapi/swag/yamlutils/doc.go b/vendor/github.com/go-openapi/swag/yamlutils/doc.go index c8454c95f..7bb92a82f 100644 --- a/vendor/github.com/go-openapi/swag/yamlutils/doc.go +++ b/vendor/github.com/go-openapi/swag/yamlutils/doc.go @@ -1,16 +1,13 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 // Package yamlutils provides utilities to work with YAML documents. +// +// - [BytesToYAMLDoc] to construct a [yaml.Node] document +// - [YAMLToJSON] to convert a [yaml.Node] document to JSON bytes +// - [YAMLMapSlice] to serialize and deserialize YAML with the order of keys maintained package yamlutils + +import ( + _ "go.yaml.in/yaml/v3" // for documentation purpose only +) diff --git a/vendor/github.com/go-openapi/swag/yamlutils/errors.go b/vendor/github.com/go-openapi/swag/yamlutils/errors.go index 014f227d9..e87bc5e8b 100644 --- a/vendor/github.com/go-openapi/swag/yamlutils/errors.go +++ b/vendor/github.com/go-openapi/swag/yamlutils/errors.go @@ -1,16 +1,5 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 package yamlutils diff --git a/vendor/github.com/go-openapi/swag/yamlutils/ordered_map.go b/vendor/github.com/go-openapi/swag/yamlutils/ordered_map.go index c12bcc19b..3daf68dbb 100644 --- a/vendor/github.com/go-openapi/swag/yamlutils/ordered_map.go +++ b/vendor/github.com/go-openapi/swag/yamlutils/ordered_map.go @@ -1,23 +1,32 @@ +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 + package yamlutils import ( "fmt" - "reflect" + "iter" + "slices" "sort" "strconv" + "github.com/go-openapi/swag/conv" "github.com/go-openapi/swag/jsonutils" - yaml "gopkg.in/yaml.v3" + "github.com/go-openapi/swag/jsonutils/adapters/ifaces" + "github.com/go-openapi/swag/typeutils" + yaml "go.yaml.in/yaml/v3" ) var ( - _ yaml.Marshaler = YAMLMapSlice{} - // _ yaml.Unmarshaler = &YAMLMapSlice{} // TODO: implement yaml.Unmarshaler + _ yaml.Marshaler = YAMLMapSlice{} + _ yaml.Unmarshaler = &YAMLMapSlice{} ) // YAMLMapSlice represents a YAML object, with the order of keys maintained. // // It is similar to [jsonutils.JSONMapSlice] and also knows how to marshal and unmarshal YAML. +// +// It behaves like an ordered map, but keys can't be accessed in constant time. type YAMLMapSlice []YAMLMapItem // YAMLMapItem represents the value of a key in a YAML object held by [YAMLMapSlice]. @@ -26,12 +35,68 @@ type YAMLMapSlice []YAMLMapItem // you should not Marshal or Unmarshal directly this type, outside of a [YAMLMapSlice]. type YAMLMapItem = jsonutils.JSONMapItem +func (s YAMLMapSlice) OrderedItems() iter.Seq2[string, any] { + return func(yield func(string, any) bool) { + for _, item := range s { + if !yield(item.Key, item.Value) { + return + } + } + } +} + +// SetOrderedItems implements [ifaces.SetOrdered]: it merges keys passed by the iterator argument +// into the [YAMLMapSlice]. +func (s *YAMLMapSlice) SetOrderedItems(items iter.Seq2[string, any]) { + if items == nil { + // force receiver to be a nil slice + *s = nil + + return + } + + m := *s + if len(m) > 0 { + // update mode: short-circuited when unmarshaling fresh data structures + idx := make(map[string]int, len(m)) + + for i, item := range m { + idx[item.Key] = i + } + + for k, v := range items { + idx, ok := idx[k] + if ok { + m[idx].Value = v + + continue + } + + m = append(m, YAMLMapItem{Key: k, Value: v}) + } + + *s = m + + return + } + + for k, v := range items { + m = append(m, YAMLMapItem{Key: k, Value: v}) + } + + *s = m +} + // MarshalJSON renders this YAML object as JSON bytes. +// +// The difference with standard JSON marshaling is that the order of keys is maintained. func (s YAMLMapSlice) MarshalJSON() ([]byte, error) { return jsonutils.JSONMapSlice(s).MarshalJSON() } // UnmarshalJSON builds this YAML object from JSON bytes. +// +// The difference with standard JSON marshaling is that the order of keys is maintained. func (s *YAMLMapSlice) UnmarshalJSON(data []byte) error { js := jsonutils.JSONMapSlice(*s) @@ -45,7 +110,14 @@ func (s *YAMLMapSlice) UnmarshalJSON(data []byte) error { } // MarshalYAML produces a YAML document as bytes -func (s YAMLMapSlice) MarshalYAML() (interface{}, error) { +// +// The difference with standard YAML marshaling is that the order of keys is maintained. +// +// It implements [yaml.Marshaler]. +func (s YAMLMapSlice) MarshalYAML() (any, error) { + if typeutils.IsNil(s) { + return []byte("null\n"), nil + } var n yaml.Node n.Kind = yaml.DocumentNode var nodes []*yaml.Node @@ -77,30 +149,45 @@ func (s YAMLMapSlice) MarshalYAML() (interface{}, error) { return yaml.Marshal(&n) } -/* // UnmarshalYAML builds a YAMLMapSlice object from a YAML document [yaml.Node]. -func (s *YAMLMapSlice) UnmarshalYAML(value *yaml.Node) error { - panic("not implemented") +// +// It implements [yaml.Unmarshaler]. +func (s *YAMLMapSlice) UnmarshalYAML(node *yaml.Node) error { + if typeutils.IsNil(*s) { + // allow to unmarshal with a simple var declaration (nil slice) + *s = YAMLMapSlice{} + } + if node == nil { + *s = nil + return nil + } - return nil -} -*/ + const sensibleAllocDivider = 2 + m := slices.Grow(*s, len(node.Content)/sensibleAllocDivider) + m = m[:0] -func isNil(input interface{}) bool { - if input == nil { - return true - } - kind := reflect.TypeOf(input).Kind() - switch kind { //nolint:exhaustive - case reflect.Ptr, reflect.Map, reflect.Slice, reflect.Chan: - return reflect.ValueOf(input).IsNil() - default: - return false + for i := 0; i < len(node.Content); i += 2 { + var nmi YAMLMapItem + k, err := yamlStringScalarC(node.Content[i]) + if err != nil { + return fmt.Errorf("unable to decode YAML map key: %w: %w", err, ErrYAML) + } + nmi.Key = k + v, err := yamlNode(node.Content[i+1]) + if err != nil { + return fmt.Errorf("unable to process YAML map value for key %q: %w: %w", k, err, ErrYAML) + } + nmi.Value = v + m = append(m, nmi) } + + *s = m + + return nil } -func json2yaml(item interface{}) (*yaml.Node, error) { - if isNil(item) { +func json2yaml(item any) (*yaml.Node, error) { + if typeutils.IsNil(item) { return &yaml.Node{ Kind: yaml.ScalarNode, Value: "null", @@ -108,39 +195,10 @@ func json2yaml(item interface{}) (*yaml.Node, error) { } switch val := item.(type) { - case YAMLMapSlice: - var n yaml.Node - n.Kind = yaml.MappingNode - for i := range val { - childNode, err := json2yaml(val[i].Value) - if err != nil { - return nil, err - } - n.Content = append(n.Content, &yaml.Node{ - Kind: yaml.ScalarNode, - Tag: yamlStringScalar, - Value: val[i].Key, - }, childNode) - } - return &n, nil - - case jsonutils.JSONMapSlice: - var n yaml.Node - n.Kind = yaml.MappingNode - for i := range val { - childNode, err := json2yaml(val[i].Value) - if err != nil { - return nil, err - } - n.Content = append(n.Content, &yaml.Node{ - Kind: yaml.ScalarNode, - Tag: yamlStringScalar, - Value: val[i].Key, - }, childNode) - } - return &n, nil + case ifaces.Ordered: + return orderedYAML(val) - case map[string]interface{}: + case map[string]any: var n yaml.Node n.Kind = yaml.MappingNode keys := make([]string, 0, len(val)) @@ -163,7 +221,7 @@ func json2yaml(item interface{}) (*yaml.Node, error) { } return &n, nil - case []interface{}: + case []any: var n yaml.Node n.Kind = yaml.SequenceNode for i := range val { @@ -180,24 +238,30 @@ func json2yaml(item interface{}) (*yaml.Node, error) { Tag: yamlStringScalar, Value: val, }, nil + case float32: + return floatNode(val) case float64: - return &yaml.Node{ - Kind: yaml.ScalarNode, - Tag: yamlFloatScalar, - Value: strconv.FormatFloat(val, 'f', -1, 64), - }, nil + return floatNode(val) + case int: + return integerNode(val) + case int8: + return integerNode(val) + case int16: + return integerNode(val) + case int32: + return integerNode(val) case int64: - return &yaml.Node{ - Kind: yaml.ScalarNode, - Tag: yamlIntScalar, - Value: strconv.FormatInt(val, 10), - }, nil + return integerNode(val) + case uint: + return uintegerNode(val) + case uint8: + return uintegerNode(val) + case uint16: + return uintegerNode(val) + case uint32: + return uintegerNode(val) case uint64: - return &yaml.Node{ - Kind: yaml.ScalarNode, - Tag: yamlIntScalar, - Value: strconv.FormatUint(val, 10), - }, nil + return uintegerNode(val) case bool: return &yaml.Node{ Kind: yaml.ScalarNode, @@ -208,3 +272,45 @@ func json2yaml(item interface{}) (*yaml.Node, error) { return nil, fmt.Errorf("unhandled type: %T: %w", val, ErrYAML) } } + +func floatNode[T conv.Float](val T) (*yaml.Node, error) { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlFloatScalar, + Value: conv.FormatFloat(val), + }, nil +} + +func integerNode[T conv.Signed](val T) (*yaml.Node, error) { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlIntScalar, + Value: conv.FormatInteger(val), + }, nil +} + +func uintegerNode[T conv.Unsigned](val T) (*yaml.Node, error) { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlIntScalar, + Value: conv.FormatUinteger(val), + }, nil +} + +func orderedYAML[T ifaces.Ordered](val T) (*yaml.Node, error) { + var n yaml.Node + n.Kind = yaml.MappingNode + for key, value := range val.OrderedItems() { + childNode, err := json2yaml(value) + if err != nil { + return nil, err + } + + n.Content = append(n.Content, &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlStringScalar, + Value: key, + }, childNode) + } + return &n, nil +} diff --git a/vendor/github.com/go-openapi/swag/yamlutils/yaml.go b/vendor/github.com/go-openapi/swag/yamlutils/yaml.go index c7e88255f..e3aff3c2f 100644 --- a/vendor/github.com/go-openapi/swag/yamlutils/yaml.go +++ b/vendor/github.com/go-openapi/swag/yamlutils/yaml.go @@ -1,16 +1,5 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 package yamlutils @@ -20,13 +9,15 @@ import ( "strconv" "github.com/go-openapi/swag/jsonutils" - yaml "gopkg.in/yaml.v3" + yaml "go.yaml.in/yaml/v3" ) // YAMLToJSON converts a YAML document into JSON bytes. // // Note: a YAML document is the output from a [yaml.Marshaler], e.g a pointer to a [yaml.Node]. -func YAMLToJSON(value interface{}) (json.RawMessage, error) { +// +// [YAMLToJSON] is typically called after [BytesToYAMLDoc]. +func YAMLToJSON(value any) (json.RawMessage, error) { jm, err := transformData(value) if err != nil { return nil, err @@ -42,7 +33,7 @@ func YAMLToJSON(value interface{}) (json.RawMessage, error) { // This function only supports root documents that are objects. // // A YAML document is a pointer to a [yaml.Node]. -func BytesToYAMLDoc(data []byte) (interface{}, error) { +func BytesToYAMLDoc(data []byte) (any, error) { var document yaml.Node // preserve order that is present in the document if err := yaml.Unmarshal(data, &document); err != nil { return nil, err @@ -53,7 +44,7 @@ func BytesToYAMLDoc(data []byte) (interface{}, error) { return &document, nil } -func yamlNode(root *yaml.Node) (interface{}, error) { +func yamlNode(root *yaml.Node) (any, error) { switch root.Kind { case yaml.DocumentNode: return yamlDocument(root) @@ -70,41 +61,28 @@ func yamlNode(root *yaml.Node) (interface{}, error) { } } -func yamlDocument(node *yaml.Node) (interface{}, error) { +func yamlDocument(node *yaml.Node) (any, error) { if len(node.Content) != 1 { return nil, fmt.Errorf("unexpected YAML Document node content length: %d: %w", len(node.Content), ErrYAML) } return yamlNode(node.Content[0]) } -func yamlMapping(node *yaml.Node) (interface{}, error) { - const sensibleAllocDivider = 2 +func yamlMapping(node *yaml.Node) (any, error) { + const sensibleAllocDivider = 2 // nodes concatenate (key,value) sequences m := make(YAMLMapSlice, len(node.Content)/sensibleAllocDivider) - var j int - for i := 0; i < len(node.Content); i += 2 { - var nmi YAMLMapItem - k, err := yamlStringScalarC(node.Content[i]) - if err != nil { - return nil, fmt.Errorf("unable to decode YAML map key: %w: %w", err, ErrYAML) - } - nmi.Key = k - v, err := yamlNode(node.Content[i+1]) - if err != nil { - return nil, fmt.Errorf("unable to process YAML map value for key %q: %w: %w", k, err, ErrYAML) - } - nmi.Value = v - m[j] = nmi - j++ + if err := m.UnmarshalYAML(node); err != nil { + return nil, err } + return m, nil } -func yamlSequence(node *yaml.Node) (interface{}, error) { - s := make([]interface{}, 0) - - for i := 0; i < len(node.Content); i++ { +func yamlSequence(node *yaml.Node) (any, error) { + s := make([]any, 0) + for i := range len(node.Content) { v, err := yamlNode(node.Content[i]) if err != nil { return nil, fmt.Errorf("unable to decode YAML sequence value: %w: %w", err, ErrYAML) @@ -123,7 +101,7 @@ const ( // See https://yaml.org/type/ yamlNull = "tag:yaml.org,2002:null" ) -func yamlScalar(node *yaml.Node) (interface{}, error) { +func yamlScalar(node *yaml.Node) (any, error) { switch node.LongTag() { case yamlStringScalar: return node.Value, nil @@ -167,42 +145,42 @@ func yamlStringScalarC(node *yaml.Node) (string, error) { } } -func transformData(input interface{}) (out interface{}, err error) { - format := func(t interface{}) (string, error) { - switch k := t.(type) { - case string: - return k, nil - case uint: - return strconv.FormatUint(uint64(k), 10), nil - case uint8: - return strconv.FormatUint(uint64(k), 10), nil - case uint16: - return strconv.FormatUint(uint64(k), 10), nil - case uint32: - return strconv.FormatUint(uint64(k), 10), nil - case uint64: - return strconv.FormatUint(k, 10), nil - case int: - return strconv.Itoa(k), nil - case int8: - return strconv.FormatInt(int64(k), 10), nil - case int16: - return strconv.FormatInt(int64(k), 10), nil - case int32: - return strconv.FormatInt(int64(k), 10), nil - case int64: - return strconv.FormatInt(k, 10), nil - default: - return "", fmt.Errorf("unexpected map key type, got: %T: %w", k, ErrYAML) - } +func format(t any) (string, error) { + switch k := t.(type) { + case string: + return k, nil + case uint: + return strconv.FormatUint(uint64(k), 10), nil + case uint8: + return strconv.FormatUint(uint64(k), 10), nil + case uint16: + return strconv.FormatUint(uint64(k), 10), nil + case uint32: + return strconv.FormatUint(uint64(k), 10), nil + case uint64: + return strconv.FormatUint(k, 10), nil + case int: + return strconv.Itoa(k), nil + case int8: + return strconv.FormatInt(int64(k), 10), nil + case int16: + return strconv.FormatInt(int64(k), 10), nil + case int32: + return strconv.FormatInt(int64(k), 10), nil + case int64: + return strconv.FormatInt(k, 10), nil + default: + return "", fmt.Errorf("unexpected map key type, got: %T: %w", k, ErrYAML) } +} +func transformData(input any) (out any, err error) { switch in := input.(type) { case yaml.Node: return yamlNode(&in) case *yaml.Node: return yamlNode(in) - case map[interface{}]interface{}: + case map[any]any: o := make(YAMLMapSlice, 0, len(in)) for ke, va := range in { var nmi YAMLMapItem @@ -218,10 +196,10 @@ func transformData(input interface{}) (out interface{}, err error) { o = append(o, nmi) } return o, nil - case []interface{}: + case []any: len1 := len(in) - o := make([]interface{}, len1) - for i := 0; i < len1; i++ { + o := make([]any, len1) + for i := range len1 { o[i], err = transformData(in[i]) if err != nil { return nil, err diff --git a/vendor/github.com/go-openapi/swag/yamlutils_iface.go b/vendor/github.com/go-openapi/swag/yamlutils_iface.go index 49e646486..57767efc5 100644 --- a/vendor/github.com/go-openapi/swag/yamlutils_iface.go +++ b/vendor/github.com/go-openapi/swag/yamlutils_iface.go @@ -1,16 +1,5 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// SPDX-FileCopyrightText: Copyright 2015-2025 go-swagger maintainers +// SPDX-License-Identifier: Apache-2.0 package swag @@ -23,9 +12,9 @@ import ( // YAMLToJSON converts YAML unmarshaled data into json compatible data // // Deprecated: use [yamlutils.YAMLToJSON] instead. -func YAMLToJSON(data interface{}) (json.RawMessage, error) { return yamlutils.YAMLToJSON(data) } +func YAMLToJSON(data any) (json.RawMessage, error) { return yamlutils.YAMLToJSON(data) } // BytesToYAMLDoc converts a byte slice into a YAML document // // Deprecated: use [yamlutils.BytesToYAMLDoc] instead. -func BytesToYAMLDoc(data []byte) (interface{}, error) { return yamlutils.BytesToYAMLDoc(data) } +func BytesToYAMLDoc(data []byte) (any, error) { return yamlutils.BytesToYAMLDoc(data) } diff --git a/vendor/github.com/gofrs/uuid/.gitignore b/vendor/github.com/gofrs/uuid/.gitignore new file mode 100644 index 000000000..666dbbb5b --- /dev/null +++ b/vendor/github.com/gofrs/uuid/.gitignore @@ -0,0 +1,15 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# binary bundle generated by go-fuzz +uuid-fuzz.zip diff --git a/vendor/github.com/gofrs/uuid/LICENSE b/vendor/github.com/gofrs/uuid/LICENSE new file mode 100644 index 000000000..926d54987 --- /dev/null +++ b/vendor/github.com/gofrs/uuid/LICENSE @@ -0,0 +1,20 @@ +Copyright (C) 2013-2018 by Maxim Bublis + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/gofrs/uuid/README.md b/vendor/github.com/gofrs/uuid/README.md new file mode 100644 index 000000000..f5db14f07 --- /dev/null +++ b/vendor/github.com/gofrs/uuid/README.md @@ -0,0 +1,117 @@ +# UUID + +[![License](https://img.shields.io/github/license/gofrs/uuid.svg)](https://github.com/gofrs/uuid/blob/master/LICENSE) +[![Build Status](https://travis-ci.org/gofrs/uuid.svg?branch=master)](https://travis-ci.org/gofrs/uuid) +[![GoDoc](http://godoc.org/github.com/gofrs/uuid?status.svg)](http://godoc.org/github.com/gofrs/uuid) +[![Coverage Status](https://codecov.io/gh/gofrs/uuid/branch/master/graphs/badge.svg?branch=master)](https://codecov.io/gh/gofrs/uuid/) +[![Go Report Card](https://goreportcard.com/badge/github.com/gofrs/uuid)](https://goreportcard.com/report/github.com/gofrs/uuid) + +Package uuid provides a pure Go implementation of Universally Unique Identifiers +(UUID) variant as defined in RFC-4122. This package supports both the creation +and parsing of UUIDs in different formats. + +This package supports the following UUID versions: +* Version 1, based on timestamp and MAC address (RFC-4122) +* Version 3, based on MD5 hashing of a named value (RFC-4122) +* Version 4, based on random numbers (RFC-4122) +* Version 5, based on SHA-1 hashing of a named value (RFC-4122) + +This package also supports experimental Universally Unique Identifier implementations based on a +[draft RFC](https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03) that updates RFC-4122 +* Version 6, a k-sortable id based on timestamp, and field-compatible with v1 (draft-peabody-dispatch-new-uuid-format, RFC-4122) +* Version 7, a k-sortable id based on timestamp (draft-peabody-dispatch-new-uuid-format, RFC-4122) + +The v6 and v7 IDs are **not** considered a part of the stable API, and may be subject to behavior or API changes as part of minor releases +to this package. They will be updated as the draft RFC changes, and will become stable if and when the draft RFC is accepted. + +## Project History + +This project was originally forked from the +[github.com/satori/go.uuid](https://github.com/satori/go.uuid) repository after +it appeared to be no longer maintained, while exhibiting [critical +flaws](https://github.com/satori/go.uuid/issues/73). We have decided to take +over this project to ensure it receives regular maintenance for the benefit of +the larger Go community. + +We'd like to thank Maxim Bublis for his hard work on the original iteration of +the package. + +## License + +This source code of this package is released under the MIT License. Please see +the [LICENSE](https://github.com/gofrs/uuid/blob/master/LICENSE) for the full +content of the license. + +## Recommended Package Version + +We recommend using v2.0.0+ of this package, as versions prior to 2.0.0 were +created before our fork of the original package and have some known +deficiencies. + +## Installation + +It is recommended to use a package manager like `dep` that understands tagged +releases of a package, as well as semantic versioning. + +If you are unable to make use of a dependency manager with your project, you can +use the `go get` command to download it directly: + +```Shell +$ go get github.com/gofrs/uuid +``` + +## Requirements + +Due to subtests not being supported in older versions of Go, this package is +only regularly tested against Go 1.7+. This package may work perfectly fine with +Go 1.2+, but support for these older versions is not actively maintained. + +## Go 1.11 Modules + +As of v3.2.0, this repository no longer adopts Go modules, and v3.2.0 no longer has a `go.mod` file. As a result, v3.2.0 also drops support for the `github.com/gofrs/uuid/v3` import path. Only module-based consumers are impacted. With the v3.2.0 release, _all_ gofrs/uuid consumers should use the `github.com/gofrs/uuid` import path. + +An existing module-based consumer will continue to be able to build using the `github.com/gofrs/uuid/v3` import path using any valid consumer `go.mod` that worked prior to the publishing of v3.2.0, but any module-based consumer should start using the `github.com/gofrs/uuid` import path when possible and _must_ use the `github.com/gofrs/uuid` import path prior to upgrading to v3.2.0. + +Please refer to [Issue #61](https://github.com/gofrs/uuid/issues/61) and [Issue #66](https://github.com/gofrs/uuid/issues/66) for more details. + +## Usage + +Here is a quick overview of how to use this package. For more detailed +documentation, please see the [GoDoc Page](http://godoc.org/github.com/gofrs/uuid). + +```go +package main + +import ( + "log" + + "github.com/gofrs/uuid" +) + +// Create a Version 4 UUID, panicking on error. +// Use this form to initialize package-level variables. +var u1 = uuid.Must(uuid.NewV4()) + +func main() { + // Create a Version 4 UUID. + u2, err := uuid.NewV4() + if err != nil { + log.Fatalf("failed to generate UUID: %v", err) + } + log.Printf("generated Version 4 UUID %v", u2) + + // Parse a UUID from a string. + s := "6ba7b810-9dad-11d1-80b4-00c04fd430c8" + u3, err := uuid.FromString(s) + if err != nil { + log.Fatalf("failed to parse UUID %q: %v", s, err) + } + log.Printf("successfully parsed UUID %v", u3) +} +``` + +## References + +* [RFC-4122](https://tools.ietf.org/html/rfc4122) +* [DCE 1.1: Authentication and Security Services](http://pubs.opengroup.org/onlinepubs/9696989899/chap5.htm#tagcjh_08_02_01_01) +* [New UUID Formats RFC Draft (Peabody) Rev 03](https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03) diff --git a/vendor/github.com/gofrs/uuid/codec.go b/vendor/github.com/gofrs/uuid/codec.go new file mode 100644 index 000000000..e3014c68c --- /dev/null +++ b/vendor/github.com/gofrs/uuid/codec.go @@ -0,0 +1,212 @@ +// Copyright (C) 2013-2018 by Maxim Bublis +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package uuid + +import ( + "bytes" + "encoding/hex" + "fmt" +) + +// FromBytes returns a UUID generated from the raw byte slice input. +// It will return an error if the slice isn't 16 bytes long. +func FromBytes(input []byte) (UUID, error) { + u := UUID{} + err := u.UnmarshalBinary(input) + return u, err +} + +// FromBytesOrNil returns a UUID generated from the raw byte slice input. +// Same behavior as FromBytes(), but returns uuid.Nil instead of an error. +func FromBytesOrNil(input []byte) UUID { + uuid, err := FromBytes(input) + if err != nil { + return Nil + } + return uuid +} + +// FromString returns a UUID parsed from the input string. +// Input is expected in a form accepted by UnmarshalText. +func FromString(input string) (UUID, error) { + u := UUID{} + err := u.UnmarshalText([]byte(input)) + return u, err +} + +// FromStringOrNil returns a UUID parsed from the input string. +// Same behavior as FromString(), but returns uuid.Nil instead of an error. +func FromStringOrNil(input string) UUID { + uuid, err := FromString(input) + if err != nil { + return Nil + } + return uuid +} + +// MarshalText implements the encoding.TextMarshaler interface. +// The encoding is the same as returned by the String() method. +func (u UUID) MarshalText() ([]byte, error) { + return []byte(u.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// Following formats are supported: +// +// "6ba7b810-9dad-11d1-80b4-00c04fd430c8", +// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}", +// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" +// "6ba7b8109dad11d180b400c04fd430c8" +// "{6ba7b8109dad11d180b400c04fd430c8}", +// "urn:uuid:6ba7b8109dad11d180b400c04fd430c8" +// +// ABNF for supported UUID text representation follows: +// +// URN := 'urn' +// UUID-NID := 'uuid' +// +// hexdig := '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' | +// 'a' | 'b' | 'c' | 'd' | 'e' | 'f' | +// 'A' | 'B' | 'C' | 'D' | 'E' | 'F' +// +// hexoct := hexdig hexdig +// 2hexoct := hexoct hexoct +// 4hexoct := 2hexoct 2hexoct +// 6hexoct := 4hexoct 2hexoct +// 12hexoct := 6hexoct 6hexoct +// +// hashlike := 12hexoct +// canonical := 4hexoct '-' 2hexoct '-' 2hexoct '-' 6hexoct +// +// plain := canonical | hashlike +// uuid := canonical | hashlike | braced | urn +// +// braced := '{' plain '}' | '{' hashlike '}' +// urn := URN ':' UUID-NID ':' plain +// +func (u *UUID) UnmarshalText(text []byte) error { + switch len(text) { + case 32: + return u.decodeHashLike(text) + case 34, 38: + return u.decodeBraced(text) + case 36: + return u.decodeCanonical(text) + case 41, 45: + return u.decodeURN(text) + default: + return fmt.Errorf("uuid: incorrect UUID length %d in string %q", len(text), text) + } +} + +// decodeCanonical decodes UUID strings that are formatted as defined in RFC-4122 (section 3): +// "6ba7b810-9dad-11d1-80b4-00c04fd430c8". +func (u *UUID) decodeCanonical(t []byte) error { + if t[8] != '-' || t[13] != '-' || t[18] != '-' || t[23] != '-' { + return fmt.Errorf("uuid: incorrect UUID format in string %q", t) + } + + src := t + dst := u[:] + + for i, byteGroup := range byteGroups { + if i > 0 { + src = src[1:] // skip dash + } + _, err := hex.Decode(dst[:byteGroup/2], src[:byteGroup]) + if err != nil { + return err + } + src = src[byteGroup:] + dst = dst[byteGroup/2:] + } + + return nil +} + +// decodeHashLike decodes UUID strings that are using the following format: +// "6ba7b8109dad11d180b400c04fd430c8". +func (u *UUID) decodeHashLike(t []byte) error { + src := t[:] + dst := u[:] + + _, err := hex.Decode(dst, src) + return err +} + +// decodeBraced decodes UUID strings that are using the following formats: +// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}" +// "{6ba7b8109dad11d180b400c04fd430c8}". +func (u *UUID) decodeBraced(t []byte) error { + l := len(t) + + if t[0] != '{' || t[l-1] != '}' { + return fmt.Errorf("uuid: incorrect UUID format in string %q", t) + } + + return u.decodePlain(t[1 : l-1]) +} + +// decodeURN decodes UUID strings that are using the following formats: +// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" +// "urn:uuid:6ba7b8109dad11d180b400c04fd430c8". +func (u *UUID) decodeURN(t []byte) error { + total := len(t) + + urnUUIDPrefix := t[:9] + + if !bytes.Equal(urnUUIDPrefix, urnPrefix) { + return fmt.Errorf("uuid: incorrect UUID format in string %q", t) + } + + return u.decodePlain(t[9:total]) +} + +// decodePlain decodes UUID strings that are using the following formats: +// "6ba7b810-9dad-11d1-80b4-00c04fd430c8" or in hash-like format +// "6ba7b8109dad11d180b400c04fd430c8". +func (u *UUID) decodePlain(t []byte) error { + switch len(t) { + case 32: + return u.decodeHashLike(t) + case 36: + return u.decodeCanonical(t) + default: + return fmt.Errorf("uuid: incorrect UUID length %d in string %q", len(t), t) + } +} + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (u UUID) MarshalBinary() ([]byte, error) { + return u.Bytes(), nil +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +// It will return an error if the slice isn't 16 bytes long. +func (u *UUID) UnmarshalBinary(data []byte) error { + if len(data) != Size { + return fmt.Errorf("uuid: UUID must be exactly 16 bytes long, got %d bytes", len(data)) + } + copy(u[:], data) + + return nil +} diff --git a/vendor/github.com/gofrs/uuid/fuzz.go b/vendor/github.com/gofrs/uuid/fuzz.go new file mode 100644 index 000000000..afaefbc8e --- /dev/null +++ b/vendor/github.com/gofrs/uuid/fuzz.go @@ -0,0 +1,47 @@ +// Copyright (c) 2018 Andrei Tudor Călin +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +// +build gofuzz + +package uuid + +// Fuzz implements a simple fuzz test for FromString / UnmarshalText. +// +// To run: +// +// $ go get github.com/dvyukov/go-fuzz/... +// $ cd $GOPATH/src/github.com/gofrs/uuid +// $ go-fuzz-build github.com/gofrs/uuid +// $ go-fuzz -bin=uuid-fuzz.zip -workdir=./testdata +// +// If you make significant changes to FromString / UnmarshalText and add +// new cases to fromStringTests (in codec_test.go), please run +// +// $ go test -seed_fuzz_corpus +// +// to seed the corpus with the new interesting inputs, then run the fuzzer. +func Fuzz(data []byte) int { + _, err := FromString(string(data)) + if err != nil { + return 0 + } + return 1 +} diff --git a/vendor/github.com/gofrs/uuid/generator.go b/vendor/github.com/gofrs/uuid/generator.go new file mode 100644 index 000000000..4550bc6b3 --- /dev/null +++ b/vendor/github.com/gofrs/uuid/generator.go @@ -0,0 +1,356 @@ +// Copyright (C) 2013-2018 by Maxim Bublis +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package uuid + +import ( + "crypto/md5" + "crypto/rand" + "crypto/sha1" + "encoding/binary" + "fmt" + "hash" + "io" + "net" + "sync" + "time" +) + +// Difference in 100-nanosecond intervals between +// UUID epoch (October 15, 1582) and Unix epoch (January 1, 1970). +const epochStart = 122192928000000000 + +type epochFunc func() time.Time + +// HWAddrFunc is the function type used to provide hardware (MAC) addresses. +type HWAddrFunc func() (net.HardwareAddr, error) + +// DefaultGenerator is the default UUID Generator used by this package. +var DefaultGenerator Generator = NewGen() + +// NewV1 returns a UUID based on the current timestamp and MAC address. +func NewV1() (UUID, error) { + return DefaultGenerator.NewV1() +} + +// NewV3 returns a UUID based on the MD5 hash of the namespace UUID and name. +func NewV3(ns UUID, name string) UUID { + return DefaultGenerator.NewV3(ns, name) +} + +// NewV4 returns a randomly generated UUID. +func NewV4() (UUID, error) { + return DefaultGenerator.NewV4() +} + +// NewV5 returns a UUID based on SHA-1 hash of the namespace UUID and name. +func NewV5(ns UUID, name string) UUID { + return DefaultGenerator.NewV5(ns, name) +} + +// NewV6 returns a k-sortable UUID based on a timestamp and 48 bits of +// pseudorandom data. The timestamp in a V6 UUID is the same as V1, with the bit +// order being adjusted to allow the UUID to be k-sortable. +// +// This is implemented based on revision 03 of the Peabody UUID draft, and may +// be subject to change pending further revisions. Until the final specification +// revision is finished, changes required to implement updates to the spec will +// not be considered a breaking change. They will happen as a minor version +// releases until the spec is final. +func NewV6() (UUID, error) { + return DefaultGenerator.NewV6() +} + +// NewV7 returns a k-sortable UUID based on the current millisecond precision +// UNIX epoch and 74 bits of pseudorandom data. +// +// This is implemented based on revision 03 of the Peabody UUID draft, and may +// be subject to change pending further revisions. Until the final specification +// revision is finished, changes required to implement updates to the spec will +// not be considered a breaking change. They will happen as a minor version +// releases until the spec is final. +func NewV7() (UUID, error) { + return DefaultGenerator.NewV7() +} + +// Generator provides an interface for generating UUIDs. +type Generator interface { + NewV1() (UUID, error) + NewV3(ns UUID, name string) UUID + NewV4() (UUID, error) + NewV5(ns UUID, name string) UUID + NewV6() (UUID, error) + NewV7() (UUID, error) +} + +// Gen is a reference UUID generator based on the specifications laid out in +// RFC-4122 and DCE 1.1: Authentication and Security Services. This type +// satisfies the Generator interface as defined in this package. +// +// For consumers who are generating V1 UUIDs, but don't want to expose the MAC +// address of the node generating the UUIDs, the NewGenWithHWAF() function has been +// provided as a convenience. See the function's documentation for more info. +// +// The authors of this package do not feel that the majority of users will need +// to obfuscate their MAC address, and so we recommend using NewGen() to create +// a new generator. +type Gen struct { + clockSequenceOnce sync.Once + hardwareAddrOnce sync.Once + storageMutex sync.Mutex + + rand io.Reader + + epochFunc epochFunc + hwAddrFunc HWAddrFunc + lastTime uint64 + clockSequence uint16 + hardwareAddr [6]byte +} + +// interface check -- build will fail if *Gen doesn't satisfy Generator +var _ Generator = (*Gen)(nil) + +// NewGen returns a new instance of Gen with some default values set. Most +// people should use this. +func NewGen() *Gen { + return NewGenWithHWAF(defaultHWAddrFunc) +} + +// NewGenWithHWAF builds a new UUID generator with the HWAddrFunc provided. Most +// consumers should use NewGen() instead. +// +// This is used so that consumers can generate their own MAC addresses, for use +// in the generated UUIDs, if there is some concern about exposing the physical +// address of the machine generating the UUID. +// +// The Gen generator will only invoke the HWAddrFunc once, and cache that MAC +// address for all the future UUIDs generated by it. If you'd like to switch the +// MAC address being used, you'll need to create a new generator using this +// function. +func NewGenWithHWAF(hwaf HWAddrFunc) *Gen { + return &Gen{ + epochFunc: time.Now, + hwAddrFunc: hwaf, + rand: rand.Reader, + } +} + +// NewV1 returns a UUID based on the current timestamp and MAC address. +func (g *Gen) NewV1() (UUID, error) { + u := UUID{} + + timeNow, clockSeq, err := g.getClockSequence() + if err != nil { + return Nil, err + } + binary.BigEndian.PutUint32(u[0:], uint32(timeNow)) + binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32)) + binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48)) + binary.BigEndian.PutUint16(u[8:], clockSeq) + + hardwareAddr, err := g.getHardwareAddr() + if err != nil { + return Nil, err + } + copy(u[10:], hardwareAddr) + + u.SetVersion(V1) + u.SetVariant(VariantRFC4122) + + return u, nil +} + +// NewV3 returns a UUID based on the MD5 hash of the namespace UUID and name. +func (g *Gen) NewV3(ns UUID, name string) UUID { + u := newFromHash(md5.New(), ns, name) + u.SetVersion(V3) + u.SetVariant(VariantRFC4122) + + return u +} + +// NewV4 returns a randomly generated UUID. +func (g *Gen) NewV4() (UUID, error) { + u := UUID{} + if _, err := io.ReadFull(g.rand, u[:]); err != nil { + return Nil, err + } + u.SetVersion(V4) + u.SetVariant(VariantRFC4122) + + return u, nil +} + +// NewV5 returns a UUID based on SHA-1 hash of the namespace UUID and name. +func (g *Gen) NewV5(ns UUID, name string) UUID { + u := newFromHash(sha1.New(), ns, name) + u.SetVersion(V5) + u.SetVariant(VariantRFC4122) + + return u +} + +// NewV6 returns a k-sortable UUID based on a timestamp and 48 bits of +// pseudorandom data. The timestamp in a V6 UUID is the same as V1, with the bit +// order being adjusted to allow the UUID to be k-sortable. +// +// This is implemented based on revision 03 of the Peabody UUID draft, and may +// be subject to change pending further revisions. Until the final specification +// revision is finished, changes required to implement updates to the spec will +// not be considered a breaking change. They will happen as a minor version +// releases until the spec is final. +func (g *Gen) NewV6() (UUID, error) { + var u UUID + + if _, err := io.ReadFull(g.rand, u[10:]); err != nil { + return Nil, err + } + + timeNow, clockSeq, err := g.getClockSequence() + if err != nil { + return Nil, err + } + + binary.BigEndian.PutUint32(u[0:], uint32(timeNow>>28)) // set time_high + binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>12)) // set time_mid + binary.BigEndian.PutUint16(u[6:], uint16(timeNow&0xfff)) // set time_low (minus four version bits) + binary.BigEndian.PutUint16(u[8:], clockSeq&0x3fff) // set clk_seq_hi_res (minus two variant bits) + + u.SetVersion(V6) + u.SetVariant(VariantRFC4122) + + return u, nil +} + +// getClockSequence returns the epoch and clock sequence for V1 and V6 UUIDs. +func (g *Gen) getClockSequence() (uint64, uint16, error) { + var err error + g.clockSequenceOnce.Do(func() { + buf := make([]byte, 2) + if _, err = io.ReadFull(g.rand, buf); err != nil { + return + } + g.clockSequence = binary.BigEndian.Uint16(buf) + }) + if err != nil { + return 0, 0, err + } + + g.storageMutex.Lock() + defer g.storageMutex.Unlock() + + timeNow := g.getEpoch() + // Clock didn't change since last UUID generation. + // Should increase clock sequence. + if timeNow <= g.lastTime { + g.clockSequence++ + } + g.lastTime = timeNow + + return timeNow, g.clockSequence, nil +} + +// NewV7 returns a k-sortable UUID based on the current millisecond precision +// UNIX epoch and 74 bits of pseudorandom data. +// +// This is implemented based on revision 03 of the Peabody UUID draft, and may +// be subject to change pending further revisions. Until the final specification +// revision is finished, changes required to implement updates to the spec will +// not be considered a breaking change. They will happen as a minor version +// releases until the spec is final. +func (g *Gen) NewV7() (UUID, error) { + var u UUID + + if _, err := io.ReadFull(g.rand, u[6:]); err != nil { + return Nil, err + } + + tn := g.epochFunc() + ms := uint64(tn.Unix())*1e3 + uint64(tn.Nanosecond())/1e6 + u[0] = byte(ms >> 40) + u[1] = byte(ms >> 32) + u[2] = byte(ms >> 24) + u[3] = byte(ms >> 16) + u[4] = byte(ms >> 8) + u[5] = byte(ms) + + u.SetVersion(V7) + u.SetVariant(VariantRFC4122) + + return u, nil +} + +// Returns the hardware address. +func (g *Gen) getHardwareAddr() ([]byte, error) { + var err error + g.hardwareAddrOnce.Do(func() { + var hwAddr net.HardwareAddr + if hwAddr, err = g.hwAddrFunc(); err == nil { + copy(g.hardwareAddr[:], hwAddr) + return + } + + // Initialize hardwareAddr randomly in case + // of real network interfaces absence. + if _, err = io.ReadFull(g.rand, g.hardwareAddr[:]); err != nil { + return + } + // Set multicast bit as recommended by RFC-4122 + g.hardwareAddr[0] |= 0x01 + }) + if err != nil { + return []byte{}, err + } + return g.hardwareAddr[:], nil +} + +// Returns the difference between UUID epoch (October 15, 1582) +// and current time in 100-nanosecond intervals. +func (g *Gen) getEpoch() uint64 { + return epochStart + uint64(g.epochFunc().UnixNano()/100) +} + +// Returns the UUID based on the hashing of the namespace UUID and name. +func newFromHash(h hash.Hash, ns UUID, name string) UUID { + u := UUID{} + h.Write(ns[:]) + h.Write([]byte(name)) + copy(u[:], h.Sum(nil)) + + return u +} + +var netInterfaces = net.Interfaces + +// Returns the hardware address. +func defaultHWAddrFunc() (net.HardwareAddr, error) { + ifaces, err := netInterfaces() + if err != nil { + return []byte{}, err + } + for _, iface := range ifaces { + if len(iface.HardwareAddr) >= 6 { + return iface.HardwareAddr, nil + } + } + return []byte{}, fmt.Errorf("uuid: no HW address found") +} diff --git a/vendor/github.com/gofrs/uuid/sql.go b/vendor/github.com/gofrs/uuid/sql.go new file mode 100644 index 000000000..6f254a4fd --- /dev/null +++ b/vendor/github.com/gofrs/uuid/sql.go @@ -0,0 +1,109 @@ +// Copyright (C) 2013-2018 by Maxim Bublis +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package uuid + +import ( + "bytes" + "database/sql/driver" + "encoding/json" + "fmt" +) + +// Value implements the driver.Valuer interface. +func (u UUID) Value() (driver.Value, error) { + return u.String(), nil +} + +// Scan implements the sql.Scanner interface. +// A 16-byte slice will be handled by UnmarshalBinary, while +// a longer byte slice or a string will be handled by UnmarshalText. +func (u *UUID) Scan(src interface{}) error { + switch src := src.(type) { + case UUID: // support gorm convert from UUID to NullUUID + *u = src + return nil + + case []byte: + if len(src) == Size { + return u.UnmarshalBinary(src) + } + return u.UnmarshalText(src) + + case string: + return u.UnmarshalText([]byte(src)) + } + + return fmt.Errorf("uuid: cannot convert %T to UUID", src) +} + +// NullUUID can be used with the standard sql package to represent a +// UUID value that can be NULL in the database. +type NullUUID struct { + UUID UUID + Valid bool +} + +// Value implements the driver.Valuer interface. +func (u NullUUID) Value() (driver.Value, error) { + if !u.Valid { + return nil, nil + } + // Delegate to UUID Value function + return u.UUID.Value() +} + +// Scan implements the sql.Scanner interface. +func (u *NullUUID) Scan(src interface{}) error { + if src == nil { + u.UUID, u.Valid = Nil, false + return nil + } + + // Delegate to UUID Scan function + u.Valid = true + return u.UUID.Scan(src) +} + +// MarshalJSON marshals the NullUUID as null or the nested UUID +func (u NullUUID) MarshalJSON() ([]byte, error) { + if !u.Valid { + return json.Marshal(nil) + } + + return json.Marshal(u.UUID) +} + +// UnmarshalJSON unmarshals a NullUUID +func (u *NullUUID) UnmarshalJSON(b []byte) error { + if bytes.Equal(b, []byte("null")) { + u.UUID, u.Valid = Nil, false + return nil + } + + if err := json.Unmarshal(b, &u.UUID); err != nil { + return err + } + + u.Valid = true + + return nil +} diff --git a/vendor/github.com/gofrs/uuid/uuid.go b/vendor/github.com/gofrs/uuid/uuid.go new file mode 100644 index 000000000..e747e5412 --- /dev/null +++ b/vendor/github.com/gofrs/uuid/uuid.go @@ -0,0 +1,292 @@ +// Copyright (C) 2013-2018 by Maxim Bublis +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +// Package uuid provides implementations of the Universally Unique Identifier +// (UUID), as specified in RFC-4122 and the Peabody RFC Draft (revision 03). +// +// RFC-4122[1] provides the specification for versions 1, 3, 4, and 5. The +// Peabody UUID RFC Draft[2] provides the specification for the new k-sortable +// UUIDs, versions 6 and 7. +// +// DCE 1.1[3] provides the specification for version 2, but version 2 support +// was removed from this package in v4 due to some concerns with the +// specification itself. Reading the spec, it seems that it would result in +// generating UUIDs that aren't very unique. In having read the spec it seemed +// that our implementation did not meet the spec. It also seems to be at-odds +// with RFC 4122, meaning we would need quite a bit of special code to support +// it. Lastly, there were no Version 2 implementations that we could find to +// ensure we were understanding the specification correctly. +// +// [1] https://tools.ietf.org/html/rfc4122 +// [2] https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03 +// [3] http://pubs.opengroup.org/onlinepubs/9696989899/chap5.htm#tagcjh_08_02_01_01 +package uuid + +import ( + "encoding/binary" + "encoding/hex" + "fmt" + "io" + "strings" + "time" +) + +// Size of a UUID in bytes. +const Size = 16 + +// UUID is an array type to represent the value of a UUID, as defined in RFC-4122. +type UUID [Size]byte + +// UUID versions. +const ( + _ byte = iota + V1 // Version 1 (date-time and MAC address) + _ // Version 2 (date-time and MAC address, DCE security version) [removed] + V3 // Version 3 (namespace name-based) + V4 // Version 4 (random) + V5 // Version 5 (namespace name-based) + V6 // Version 6 (k-sortable timestamp and random data, field-compatible with v1) [peabody draft] + V7 // Version 7 (k-sortable timestamp and random data) [peabody draft] + _ // Version 8 (k-sortable timestamp, meant for custom implementations) [peabody draft] [not implemented] +) + +// UUID layout variants. +const ( + VariantNCS byte = iota + VariantRFC4122 + VariantMicrosoft + VariantFuture +) + +// UUID DCE domains. +const ( + DomainPerson = iota + DomainGroup + DomainOrg +) + +// Timestamp is the count of 100-nanosecond intervals since 00:00:00.00, +// 15 October 1582 within a V1 UUID. This type has no meaning for other +// UUID versions since they don't have an embedded timestamp. +type Timestamp uint64 + +const _100nsPerSecond = 10000000 + +// Time returns the UTC time.Time representation of a Timestamp +func (t Timestamp) Time() (time.Time, error) { + secs := uint64(t) / _100nsPerSecond + nsecs := 100 * (uint64(t) % _100nsPerSecond) + + return time.Unix(int64(secs)-(epochStart/_100nsPerSecond), int64(nsecs)), nil +} + +// TimestampFromV1 returns the Timestamp embedded within a V1 UUID. +// Returns an error if the UUID is any version other than 1. +func TimestampFromV1(u UUID) (Timestamp, error) { + if u.Version() != 1 { + err := fmt.Errorf("uuid: %s is version %d, not version 1", u, u.Version()) + return 0, err + } + + low := binary.BigEndian.Uint32(u[0:4]) + mid := binary.BigEndian.Uint16(u[4:6]) + hi := binary.BigEndian.Uint16(u[6:8]) & 0xfff + + return Timestamp(uint64(low) + (uint64(mid) << 32) + (uint64(hi) << 48)), nil +} + +// TimestampFromV6 returns the Timestamp embedded within a V6 UUID. This +// function returns an error if the UUID is any version other than 6. +// +// This is implemented based on revision 03 of the Peabody UUID draft, and may +// be subject to change pending further revisions. Until the final specification +// revision is finished, changes required to implement updates to the spec will +// not be considered a breaking change. They will happen as a minor version +// releases until the spec is final. +func TimestampFromV6(u UUID) (Timestamp, error) { + if u.Version() != 6 { + return 0, fmt.Errorf("uuid: %s is version %d, not version 6", u, u.Version()) + } + + hi := binary.BigEndian.Uint32(u[0:4]) + mid := binary.BigEndian.Uint16(u[4:6]) + low := binary.BigEndian.Uint16(u[6:8]) & 0xfff + + return Timestamp(uint64(low) + (uint64(mid) << 12) + (uint64(hi) << 28)), nil +} + +// String parse helpers. +var ( + urnPrefix = []byte("urn:uuid:") + byteGroups = []int{8, 4, 4, 4, 12} +) + +// Nil is the nil UUID, as specified in RFC-4122, that has all 128 bits set to +// zero. +var Nil = UUID{} + +// Predefined namespace UUIDs. +var ( + NamespaceDNS = Must(FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) + NamespaceURL = Must(FromString("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) + NamespaceOID = Must(FromString("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) + NamespaceX500 = Must(FromString("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) +) + +// IsNil returns if the UUID is equal to the nil UUID +func (u UUID) IsNil() bool { + return u == Nil +} + +// Version returns the algorithm version used to generate the UUID. +func (u UUID) Version() byte { + return u[6] >> 4 +} + +// Variant returns the UUID layout variant. +func (u UUID) Variant() byte { + switch { + case (u[8] >> 7) == 0x00: + return VariantNCS + case (u[8] >> 6) == 0x02: + return VariantRFC4122 + case (u[8] >> 5) == 0x06: + return VariantMicrosoft + case (u[8] >> 5) == 0x07: + fallthrough + default: + return VariantFuture + } +} + +// Bytes returns a byte slice representation of the UUID. +func (u UUID) Bytes() []byte { + return u[:] +} + +// String returns a canonical RFC-4122 string representation of the UUID: +// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx. +func (u UUID) String() string { + buf := make([]byte, 36) + + hex.Encode(buf[0:8], u[0:4]) + buf[8] = '-' + hex.Encode(buf[9:13], u[4:6]) + buf[13] = '-' + hex.Encode(buf[14:18], u[6:8]) + buf[18] = '-' + hex.Encode(buf[19:23], u[8:10]) + buf[23] = '-' + hex.Encode(buf[24:], u[10:]) + + return string(buf) +} + +// Format implements fmt.Formatter for UUID values. +// +// The behavior is as follows: +// The 'x' and 'X' verbs output only the hex digits of the UUID, using a-f for 'x' and A-F for 'X'. +// The 'v', '+v', 's' and 'q' verbs return the canonical RFC-4122 string representation. +// The 'S' verb returns the RFC-4122 format, but with capital hex digits. +// The '#v' verb returns the "Go syntax" representation, which is a 16 byte array initializer. +// All other verbs not handled directly by the fmt package (like '%p') are unsupported and will return +// "%!verb(uuid.UUID=value)" as recommended by the fmt package. +func (u UUID) Format(f fmt.State, c rune) { + switch c { + case 'x', 'X': + s := hex.EncodeToString(u.Bytes()) + if c == 'X' { + s = strings.Map(toCapitalHexDigits, s) + } + _, _ = io.WriteString(f, s) + case 'v': + var s string + if f.Flag('#') { + s = fmt.Sprintf("%#v", [Size]byte(u)) + } else { + s = u.String() + } + _, _ = io.WriteString(f, s) + case 's', 'S': + s := u.String() + if c == 'S' { + s = strings.Map(toCapitalHexDigits, s) + } + _, _ = io.WriteString(f, s) + case 'q': + _, _ = io.WriteString(f, `"`+u.String()+`"`) + default: + // invalid/unsupported format verb + fmt.Fprintf(f, "%%!%c(uuid.UUID=%s)", c, u.String()) + } +} + +func toCapitalHexDigits(ch rune) rune { + // convert a-f hex digits to A-F + switch ch { + case 'a': + return 'A' + case 'b': + return 'B' + case 'c': + return 'C' + case 'd': + return 'D' + case 'e': + return 'E' + case 'f': + return 'F' + default: + return ch + } +} + +// SetVersion sets the version bits. +func (u *UUID) SetVersion(v byte) { + u[6] = (u[6] & 0x0f) | (v << 4) +} + +// SetVariant sets the variant bits. +func (u *UUID) SetVariant(v byte) { + switch v { + case VariantNCS: + u[8] = (u[8]&(0xff>>1) | (0x00 << 7)) + case VariantRFC4122: + u[8] = (u[8]&(0xff>>2) | (0x02 << 6)) + case VariantMicrosoft: + u[8] = (u[8]&(0xff>>3) | (0x06 << 5)) + case VariantFuture: + fallthrough + default: + u[8] = (u[8]&(0xff>>3) | (0x07 << 5)) + } +} + +// Must is a helper that wraps a call to a function returning (UUID, error) +// and panics if the error is non-nil. It is intended for use in variable +// initializations such as +// var packageUUID = uuid.Must(uuid.FromString("123e4567-e89b-12d3-a456-426655440000")) +func Must(u UUID, err error) UUID { + if err != nil { + panic(err) + } + return u +} diff --git a/vendor/github.com/google/gnostic-models/extensions/extension.proto b/vendor/github.com/google/gnostic-models/extensions/extension.proto index 875137c1a..a60042989 100644 --- a/vendor/github.com/google/gnostic-models/extensions/extension.proto +++ b/vendor/github.com/google/gnostic-models/extensions/extension.proto @@ -42,7 +42,7 @@ option java_package = "org.gnostic.v1"; option objc_class_prefix = "GNX"; // The Go package name. -option go_package = "./extensions;gnostic_extension_v1"; +option go_package = "github.com/google/gnostic-models/extensions;gnostic_extension_v1"; // The version number of Gnostic. message Version { diff --git a/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.proto b/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.proto index 1c59b2f4a..49adafcc8 100644 --- a/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.proto +++ b/vendor/github.com/google/gnostic-models/openapiv2/OpenAPIv2.proto @@ -42,7 +42,7 @@ option java_package = "org.openapi_v2"; option objc_class_prefix = "OAS"; // The Go package name. -option go_package = "./openapiv2;openapi_v2"; +option go_package = "github.com/google/gnostic-models/openapiv2;openapi_v2"; message AdditionalPropertiesItem { oneof oneof { diff --git a/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.proto b/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.proto index 1be335b89..af4b6254b 100644 --- a/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.proto +++ b/vendor/github.com/google/gnostic-models/openapiv3/OpenAPIv3.proto @@ -42,7 +42,7 @@ option java_package = "org.openapi_v3"; option objc_class_prefix = "OAS"; // The Go package name. -option go_package = "./openapiv3;openapi_v3"; +option go_package = "github.com/google/gnostic-models/openapiv3;openapi_v3"; message AdditionalPropertiesItem { oneof oneof { diff --git a/vendor/github.com/google/gnostic-models/openapiv3/annotations.proto b/vendor/github.com/google/gnostic-models/openapiv3/annotations.proto index 09ee0aac5..895b4567c 100644 --- a/vendor/github.com/google/gnostic-models/openapiv3/annotations.proto +++ b/vendor/github.com/google/gnostic-models/openapiv3/annotations.proto @@ -20,7 +20,7 @@ import "google/protobuf/descriptor.proto"; import "openapiv3/OpenAPIv3.proto"; // The Go package name. -option go_package = "./openapiv3;openapi_v3"; +option go_package = "github.com/google/gnostic-models/openapiv3;openapi_v3"; // This option lets the proto compiler generate Java code inside the package // name (see below) instead of inside an outer class. It creates a simpler // developer experience by reducing one-level of name nesting and be diff --git a/vendor/github.com/hashicorp/go-immutable-radix/.gitignore b/vendor/github.com/hashicorp/go-immutable-radix/.gitignore new file mode 100644 index 000000000..daf913b1b --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md b/vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md new file mode 100644 index 000000000..86c6d03fb --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md @@ -0,0 +1,23 @@ +# UNRELEASED + +# 1.3.0 (September 17th, 2020) + +FEATURES + +* Add reverse tree traversal [[GH-30](https://github.com/hashicorp/go-immutable-radix/pull/30)] + +# 1.2.0 (March 18th, 2020) + +FEATURES + +* Adds a `Clone` method to `Txn` allowing transactions to be split either into two independently mutable trees. [[GH-26](https://github.com/hashicorp/go-immutable-radix/pull/26)] + +# 1.1.0 (May 22nd, 2019) + +FEATURES + +* Add `SeekLowerBound` to allow for range scans. [[GH-24](https://github.com/hashicorp/go-immutable-radix/pull/24)] + +# 1.0.0 (August 30th, 2018) + +* go mod adopted diff --git a/vendor/github.com/hashicorp/go-immutable-radix/LICENSE b/vendor/github.com/hashicorp/go-immutable-radix/LICENSE new file mode 100644 index 000000000..e87a115e4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-immutable-radix/README.md b/vendor/github.com/hashicorp/go-immutable-radix/README.md new file mode 100644 index 000000000..aca15a642 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/README.md @@ -0,0 +1,66 @@ +go-immutable-radix [![CircleCI](https://circleci.com/gh/hashicorp/go-immutable-radix/tree/master.svg?style=svg)](https://circleci.com/gh/hashicorp/go-immutable-radix/tree/master) +========= + +Provides the `iradix` package that implements an immutable [radix tree](http://en.wikipedia.org/wiki/Radix_tree). +The package only provides a single `Tree` implementation, optimized for sparse nodes. + +As a radix tree, it provides the following: + * O(k) operations. In many cases, this can be faster than a hash table since + the hash function is an O(k) operation, and hash tables have very poor cache locality. + * Minimum / Maximum value lookups + * Ordered iteration + +A tree supports using a transaction to batch multiple updates (insert, delete) +in a more efficient manner than performing each operation one at a time. + +For a mutable variant, see [go-radix](https://github.com/armon/go-radix). + +Documentation +============= + +The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/go-immutable-radix). + +Example +======= + +Below is a simple example of usage + +```go +// Create a tree +r := iradix.New() +r, _, _ = r.Insert([]byte("foo"), 1) +r, _, _ = r.Insert([]byte("bar"), 2) +r, _, _ = r.Insert([]byte("foobar"), 2) + +// Find the longest prefix match +m, _, _ := r.Root().LongestPrefix([]byte("foozip")) +if string(m) != "foo" { + panic("should be foo") +} +``` + +Here is an example of performing a range scan of the keys. + +```go +// Create a tree +r := iradix.New() +r, _, _ = r.Insert([]byte("001"), 1) +r, _, _ = r.Insert([]byte("002"), 2) +r, _, _ = r.Insert([]byte("005"), 5) +r, _, _ = r.Insert([]byte("010"), 10) +r, _, _ = r.Insert([]byte("100"), 10) + +// Range scan over the keys that sort lexicographically between [003, 050) +it := r.Root().Iterator() +it.SeekLowerBound([]byte("003")) +for key, _, ok := it.Next(); ok; key, _, ok = it.Next() { + if key >= "050" { + break + } + fmt.Println(key) +} +// Output: +// 005 +// 010 +``` + diff --git a/vendor/github.com/hashicorp/go-immutable-radix/edges.go b/vendor/github.com/hashicorp/go-immutable-radix/edges.go new file mode 100644 index 000000000..a63674775 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/edges.go @@ -0,0 +1,21 @@ +package iradix + +import "sort" + +type edges []edge + +func (e edges) Len() int { + return len(e) +} + +func (e edges) Less(i, j int) bool { + return e[i].label < e[j].label +} + +func (e edges) Swap(i, j int) { + e[i], e[j] = e[j], e[i] +} + +func (e edges) Sort() { + sort.Sort(e) +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/iradix.go b/vendor/github.com/hashicorp/go-immutable-radix/iradix.go new file mode 100644 index 000000000..168bda76d --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/iradix.go @@ -0,0 +1,676 @@ +package iradix + +import ( + "bytes" + "strings" + + "github.com/hashicorp/golang-lru/simplelru" +) + +const ( + // defaultModifiedCache is the default size of the modified node + // cache used per transaction. This is used to cache the updates + // to the nodes near the root, while the leaves do not need to be + // cached. This is important for very large transactions to prevent + // the modified cache from growing to be enormous. This is also used + // to set the max size of the mutation notify maps since those should + // also be bounded in a similar way. + defaultModifiedCache = 8192 +) + +// Tree implements an immutable radix tree. This can be treated as a +// Dictionary abstract data type. The main advantage over a standard +// hash map is prefix-based lookups and ordered iteration. The immutability +// means that it is safe to concurrently read from a Tree without any +// coordination. +type Tree struct { + root *Node + size int +} + +// New returns an empty Tree +func New() *Tree { + t := &Tree{ + root: &Node{ + mutateCh: make(chan struct{}), + }, + } + return t +} + +// Len is used to return the number of elements in the tree +func (t *Tree) Len() int { + return t.size +} + +// Txn is a transaction on the tree. This transaction is applied +// atomically and returns a new tree when committed. A transaction +// is not thread safe, and should only be used by a single goroutine. +type Txn struct { + // root is the modified root for the transaction. + root *Node + + // snap is a snapshot of the root node for use if we have to run the + // slow notify algorithm. + snap *Node + + // size tracks the size of the tree as it is modified during the + // transaction. + size int + + // writable is a cache of writable nodes that have been created during + // the course of the transaction. This allows us to re-use the same + // nodes for further writes and avoid unnecessary copies of nodes that + // have never been exposed outside the transaction. This will only hold + // up to defaultModifiedCache number of entries. + writable *simplelru.LRU + + // trackChannels is used to hold channels that need to be notified to + // signal mutation of the tree. This will only hold up to + // defaultModifiedCache number of entries, after which we will set the + // trackOverflow flag, which will cause us to use a more expensive + // algorithm to perform the notifications. Mutation tracking is only + // performed if trackMutate is true. + trackChannels map[chan struct{}]struct{} + trackOverflow bool + trackMutate bool +} + +// Txn starts a new transaction that can be used to mutate the tree +func (t *Tree) Txn() *Txn { + txn := &Txn{ + root: t.root, + snap: t.root, + size: t.size, + } + return txn +} + +// Clone makes an independent copy of the transaction. The new transaction +// does not track any nodes and has TrackMutate turned off. The cloned transaction will contain any uncommitted writes in the original transaction but further mutations to either will be independent and result in different radix trees on Commit. A cloned transaction may be passed to another goroutine and mutated there independently however each transaction may only be mutated in a single thread. +func (t *Txn) Clone() *Txn { + // reset the writable node cache to avoid leaking future writes into the clone + t.writable = nil + + txn := &Txn{ + root: t.root, + snap: t.snap, + size: t.size, + } + return txn +} + +// TrackMutate can be used to toggle if mutations are tracked. If this is enabled +// then notifications will be issued for affected internal nodes and leaves when +// the transaction is committed. +func (t *Txn) TrackMutate(track bool) { + t.trackMutate = track +} + +// trackChannel safely attempts to track the given mutation channel, setting the +// overflow flag if we can no longer track any more. This limits the amount of +// state that will accumulate during a transaction and we have a slower algorithm +// to switch to if we overflow. +func (t *Txn) trackChannel(ch chan struct{}) { + // In overflow, make sure we don't store any more objects. + if t.trackOverflow { + return + } + + // If this would overflow the state we reject it and set the flag (since + // we aren't tracking everything that's required any longer). + if len(t.trackChannels) >= defaultModifiedCache { + // Mark that we are in the overflow state + t.trackOverflow = true + + // Clear the map so that the channels can be garbage collected. It is + // safe to do this since we have already overflowed and will be using + // the slow notify algorithm. + t.trackChannels = nil + return + } + + // Create the map on the fly when we need it. + if t.trackChannels == nil { + t.trackChannels = make(map[chan struct{}]struct{}) + } + + // Otherwise we are good to track it. + t.trackChannels[ch] = struct{}{} +} + +// writeNode returns a node to be modified, if the current node has already been +// modified during the course of the transaction, it is used in-place. Set +// forLeafUpdate to true if you are getting a write node to update the leaf, +// which will set leaf mutation tracking appropriately as well. +func (t *Txn) writeNode(n *Node, forLeafUpdate bool) *Node { + // Ensure the writable set exists. + if t.writable == nil { + lru, err := simplelru.NewLRU(defaultModifiedCache, nil) + if err != nil { + panic(err) + } + t.writable = lru + } + + // If this node has already been modified, we can continue to use it + // during this transaction. We know that we don't need to track it for + // a node update since the node is writable, but if this is for a leaf + // update we track it, in case the initial write to this node didn't + // update the leaf. + if _, ok := t.writable.Get(n); ok { + if t.trackMutate && forLeafUpdate && n.leaf != nil { + t.trackChannel(n.leaf.mutateCh) + } + return n + } + + // Mark this node as being mutated. + if t.trackMutate { + t.trackChannel(n.mutateCh) + } + + // Mark its leaf as being mutated, if appropriate. + if t.trackMutate && forLeafUpdate && n.leaf != nil { + t.trackChannel(n.leaf.mutateCh) + } + + // Copy the existing node. If you have set forLeafUpdate it will be + // safe to replace this leaf with another after you get your node for + // writing. You MUST replace it, because the channel associated with + // this leaf will be closed when this transaction is committed. + nc := &Node{ + mutateCh: make(chan struct{}), + leaf: n.leaf, + } + if n.prefix != nil { + nc.prefix = make([]byte, len(n.prefix)) + copy(nc.prefix, n.prefix) + } + if len(n.edges) != 0 { + nc.edges = make([]edge, len(n.edges)) + copy(nc.edges, n.edges) + } + + // Mark this node as writable. + t.writable.Add(nc, nil) + return nc +} + +// Visit all the nodes in the tree under n, and add their mutateChannels to the transaction +// Returns the size of the subtree visited +func (t *Txn) trackChannelsAndCount(n *Node) int { + // Count only leaf nodes + leaves := 0 + if n.leaf != nil { + leaves = 1 + } + // Mark this node as being mutated. + if t.trackMutate { + t.trackChannel(n.mutateCh) + } + + // Mark its leaf as being mutated, if appropriate. + if t.trackMutate && n.leaf != nil { + t.trackChannel(n.leaf.mutateCh) + } + + // Recurse on the children + for _, e := range n.edges { + leaves += t.trackChannelsAndCount(e.node) + } + return leaves +} + +// mergeChild is called to collapse the given node with its child. This is only +// called when the given node is not a leaf and has a single edge. +func (t *Txn) mergeChild(n *Node) { + // Mark the child node as being mutated since we are about to abandon + // it. We don't need to mark the leaf since we are retaining it if it + // is there. + e := n.edges[0] + child := e.node + if t.trackMutate { + t.trackChannel(child.mutateCh) + } + + // Merge the nodes. + n.prefix = concat(n.prefix, child.prefix) + n.leaf = child.leaf + if len(child.edges) != 0 { + n.edges = make([]edge, len(child.edges)) + copy(n.edges, child.edges) + } else { + n.edges = nil + } +} + +// insert does a recursive insertion +func (t *Txn) insert(n *Node, k, search []byte, v interface{}) (*Node, interface{}, bool) { + // Handle key exhaustion + if len(search) == 0 { + var oldVal interface{} + didUpdate := false + if n.isLeaf() { + oldVal = n.leaf.val + didUpdate = true + } + + nc := t.writeNode(n, true) + nc.leaf = &leafNode{ + mutateCh: make(chan struct{}), + key: k, + val: v, + } + return nc, oldVal, didUpdate + } + + // Look for the edge + idx, child := n.getEdge(search[0]) + + // No edge, create one + if child == nil { + e := edge{ + label: search[0], + node: &Node{ + mutateCh: make(chan struct{}), + leaf: &leafNode{ + mutateCh: make(chan struct{}), + key: k, + val: v, + }, + prefix: search, + }, + } + nc := t.writeNode(n, false) + nc.addEdge(e) + return nc, nil, false + } + + // Determine longest prefix of the search key on match + commonPrefix := longestPrefix(search, child.prefix) + if commonPrefix == len(child.prefix) { + search = search[commonPrefix:] + newChild, oldVal, didUpdate := t.insert(child, k, search, v) + if newChild != nil { + nc := t.writeNode(n, false) + nc.edges[idx].node = newChild + return nc, oldVal, didUpdate + } + return nil, oldVal, didUpdate + } + + // Split the node + nc := t.writeNode(n, false) + splitNode := &Node{ + mutateCh: make(chan struct{}), + prefix: search[:commonPrefix], + } + nc.replaceEdge(edge{ + label: search[0], + node: splitNode, + }) + + // Restore the existing child node + modChild := t.writeNode(child, false) + splitNode.addEdge(edge{ + label: modChild.prefix[commonPrefix], + node: modChild, + }) + modChild.prefix = modChild.prefix[commonPrefix:] + + // Create a new leaf node + leaf := &leafNode{ + mutateCh: make(chan struct{}), + key: k, + val: v, + } + + // If the new key is a subset, add to to this node + search = search[commonPrefix:] + if len(search) == 0 { + splitNode.leaf = leaf + return nc, nil, false + } + + // Create a new edge for the node + splitNode.addEdge(edge{ + label: search[0], + node: &Node{ + mutateCh: make(chan struct{}), + leaf: leaf, + prefix: search, + }, + }) + return nc, nil, false +} + +// delete does a recursive deletion +func (t *Txn) delete(parent, n *Node, search []byte) (*Node, *leafNode) { + // Check for key exhaustion + if len(search) == 0 { + if !n.isLeaf() { + return nil, nil + } + // Copy the pointer in case we are in a transaction that already + // modified this node since the node will be reused. Any changes + // made to the node will not affect returning the original leaf + // value. + oldLeaf := n.leaf + + // Remove the leaf node + nc := t.writeNode(n, true) + nc.leaf = nil + + // Check if this node should be merged + if n != t.root && len(nc.edges) == 1 { + t.mergeChild(nc) + } + return nc, oldLeaf + } + + // Look for an edge + label := search[0] + idx, child := n.getEdge(label) + if child == nil || !bytes.HasPrefix(search, child.prefix) { + return nil, nil + } + + // Consume the search prefix + search = search[len(child.prefix):] + newChild, leaf := t.delete(n, child, search) + if newChild == nil { + return nil, nil + } + + // Copy this node. WATCH OUT - it's safe to pass "false" here because we + // will only ADD a leaf via nc.mergeChild() if there isn't one due to + // the !nc.isLeaf() check in the logic just below. This is pretty subtle, + // so be careful if you change any of the logic here. + nc := t.writeNode(n, false) + + // Delete the edge if the node has no edges + if newChild.leaf == nil && len(newChild.edges) == 0 { + nc.delEdge(label) + if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() { + t.mergeChild(nc) + } + } else { + nc.edges[idx].node = newChild + } + return nc, leaf +} + +// delete does a recursive deletion +func (t *Txn) deletePrefix(parent, n *Node, search []byte) (*Node, int) { + // Check for key exhaustion + if len(search) == 0 { + nc := t.writeNode(n, true) + if n.isLeaf() { + nc.leaf = nil + } + nc.edges = nil + return nc, t.trackChannelsAndCount(n) + } + + // Look for an edge + label := search[0] + idx, child := n.getEdge(label) + // We make sure that either the child node's prefix starts with the search term, or the search term starts with the child node's prefix + // Need to do both so that we can delete prefixes that don't correspond to any node in the tree + if child == nil || (!bytes.HasPrefix(child.prefix, search) && !bytes.HasPrefix(search, child.prefix)) { + return nil, 0 + } + + // Consume the search prefix + if len(child.prefix) > len(search) { + search = []byte("") + } else { + search = search[len(child.prefix):] + } + newChild, numDeletions := t.deletePrefix(n, child, search) + if newChild == nil { + return nil, 0 + } + // Copy this node. WATCH OUT - it's safe to pass "false" here because we + // will only ADD a leaf via nc.mergeChild() if there isn't one due to + // the !nc.isLeaf() check in the logic just below. This is pretty subtle, + // so be careful if you change any of the logic here. + + nc := t.writeNode(n, false) + + // Delete the edge if the node has no edges + if newChild.leaf == nil && len(newChild.edges) == 0 { + nc.delEdge(label) + if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() { + t.mergeChild(nc) + } + } else { + nc.edges[idx].node = newChild + } + return nc, numDeletions +} + +// Insert is used to add or update a given key. The return provides +// the previous value and a bool indicating if any was set. +func (t *Txn) Insert(k []byte, v interface{}) (interface{}, bool) { + newRoot, oldVal, didUpdate := t.insert(t.root, k, k, v) + if newRoot != nil { + t.root = newRoot + } + if !didUpdate { + t.size++ + } + return oldVal, didUpdate +} + +// Delete is used to delete a given key. Returns the old value if any, +// and a bool indicating if the key was set. +func (t *Txn) Delete(k []byte) (interface{}, bool) { + newRoot, leaf := t.delete(nil, t.root, k) + if newRoot != nil { + t.root = newRoot + } + if leaf != nil { + t.size-- + return leaf.val, true + } + return nil, false +} + +// DeletePrefix is used to delete an entire subtree that matches the prefix +// This will delete all nodes under that prefix +func (t *Txn) DeletePrefix(prefix []byte) bool { + newRoot, numDeletions := t.deletePrefix(nil, t.root, prefix) + if newRoot != nil { + t.root = newRoot + t.size = t.size - numDeletions + return true + } + return false + +} + +// Root returns the current root of the radix tree within this +// transaction. The root is not safe across insert and delete operations, +// but can be used to read the current state during a transaction. +func (t *Txn) Root() *Node { + return t.root +} + +// Get is used to lookup a specific key, returning +// the value and if it was found +func (t *Txn) Get(k []byte) (interface{}, bool) { + return t.root.Get(k) +} + +// GetWatch is used to lookup a specific key, returning +// the watch channel, value and if it was found +func (t *Txn) GetWatch(k []byte) (<-chan struct{}, interface{}, bool) { + return t.root.GetWatch(k) +} + +// Commit is used to finalize the transaction and return a new tree. If mutation +// tracking is turned on then notifications will also be issued. +func (t *Txn) Commit() *Tree { + nt := t.CommitOnly() + if t.trackMutate { + t.Notify() + } + return nt +} + +// CommitOnly is used to finalize the transaction and return a new tree, but +// does not issue any notifications until Notify is called. +func (t *Txn) CommitOnly() *Tree { + nt := &Tree{t.root, t.size} + t.writable = nil + return nt +} + +// slowNotify does a complete comparison of the before and after trees in order +// to trigger notifications. This doesn't require any additional state but it +// is very expensive to compute. +func (t *Txn) slowNotify() { + snapIter := t.snap.rawIterator() + rootIter := t.root.rawIterator() + for snapIter.Front() != nil || rootIter.Front() != nil { + // If we've exhausted the nodes in the old snapshot, we know + // there's nothing remaining to notify. + if snapIter.Front() == nil { + return + } + snapElem := snapIter.Front() + + // If we've exhausted the nodes in the new root, we know we need + // to invalidate everything that remains in the old snapshot. We + // know from the loop condition there's something in the old + // snapshot. + if rootIter.Front() == nil { + close(snapElem.mutateCh) + if snapElem.isLeaf() { + close(snapElem.leaf.mutateCh) + } + snapIter.Next() + continue + } + + // Do one string compare so we can check the various conditions + // below without repeating the compare. + cmp := strings.Compare(snapIter.Path(), rootIter.Path()) + + // If the snapshot is behind the root, then we must have deleted + // this node during the transaction. + if cmp < 0 { + close(snapElem.mutateCh) + if snapElem.isLeaf() { + close(snapElem.leaf.mutateCh) + } + snapIter.Next() + continue + } + + // If the snapshot is ahead of the root, then we must have added + // this node during the transaction. + if cmp > 0 { + rootIter.Next() + continue + } + + // If we have the same path, then we need to see if we mutated a + // node and possibly the leaf. + rootElem := rootIter.Front() + if snapElem != rootElem { + close(snapElem.mutateCh) + if snapElem.leaf != nil && (snapElem.leaf != rootElem.leaf) { + close(snapElem.leaf.mutateCh) + } + } + snapIter.Next() + rootIter.Next() + } +} + +// Notify is used along with TrackMutate to trigger notifications. This must +// only be done once a transaction is committed via CommitOnly, and it is called +// automatically by Commit. +func (t *Txn) Notify() { + if !t.trackMutate { + return + } + + // If we've overflowed the tracking state we can't use it in any way and + // need to do a full tree compare. + if t.trackOverflow { + t.slowNotify() + } else { + for ch := range t.trackChannels { + close(ch) + } + } + + // Clean up the tracking state so that a re-notify is safe (will trigger + // the else clause above which will be a no-op). + t.trackChannels = nil + t.trackOverflow = false +} + +// Insert is used to add or update a given key. The return provides +// the new tree, previous value and a bool indicating if any was set. +func (t *Tree) Insert(k []byte, v interface{}) (*Tree, interface{}, bool) { + txn := t.Txn() + old, ok := txn.Insert(k, v) + return txn.Commit(), old, ok +} + +// Delete is used to delete a given key. Returns the new tree, +// old value if any, and a bool indicating if the key was set. +func (t *Tree) Delete(k []byte) (*Tree, interface{}, bool) { + txn := t.Txn() + old, ok := txn.Delete(k) + return txn.Commit(), old, ok +} + +// DeletePrefix is used to delete all nodes starting with a given prefix. Returns the new tree, +// and a bool indicating if the prefix matched any nodes +func (t *Tree) DeletePrefix(k []byte) (*Tree, bool) { + txn := t.Txn() + ok := txn.DeletePrefix(k) + return txn.Commit(), ok +} + +// Root returns the root node of the tree which can be used for richer +// query operations. +func (t *Tree) Root() *Node { + return t.root +} + +// Get is used to lookup a specific key, returning +// the value and if it was found +func (t *Tree) Get(k []byte) (interface{}, bool) { + return t.root.Get(k) +} + +// longestPrefix finds the length of the shared prefix +// of two strings +func longestPrefix(k1, k2 []byte) int { + max := len(k1) + if l := len(k2); l < max { + max = l + } + var i int + for i = 0; i < max; i++ { + if k1[i] != k2[i] { + break + } + } + return i +} + +// concat two byte slices, returning a third new copy +func concat(a, b []byte) []byte { + c := make([]byte, len(a)+len(b)) + copy(c, a) + copy(c[len(a):], b) + return c +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/iter.go b/vendor/github.com/hashicorp/go-immutable-radix/iter.go new file mode 100644 index 000000000..f17d0a644 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/iter.go @@ -0,0 +1,205 @@ +package iradix + +import ( + "bytes" +) + +// Iterator is used to iterate over a set of nodes +// in pre-order +type Iterator struct { + node *Node + stack []edges +} + +// SeekPrefixWatch is used to seek the iterator to a given prefix +// and returns the watch channel of the finest granularity +func (i *Iterator) SeekPrefixWatch(prefix []byte) (watch <-chan struct{}) { + // Wipe the stack + i.stack = nil + n := i.node + watch = n.mutateCh + search := prefix + for { + // Check for key exhaustion + if len(search) == 0 { + i.node = n + return + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + i.node = nil + return + } + + // Update to the finest granularity as the search makes progress + watch = n.mutateCh + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + + } else if bytes.HasPrefix(n.prefix, search) { + i.node = n + return + } else { + i.node = nil + return + } + } +} + +// SeekPrefix is used to seek the iterator to a given prefix +func (i *Iterator) SeekPrefix(prefix []byte) { + i.SeekPrefixWatch(prefix) +} + +func (i *Iterator) recurseMin(n *Node) *Node { + // Traverse to the minimum child + if n.leaf != nil { + return n + } + nEdges := len(n.edges) + if nEdges > 1 { + // Add all the other edges to the stack (the min node will be added as + // we recurse) + i.stack = append(i.stack, n.edges[1:]) + } + if nEdges > 0 { + return i.recurseMin(n.edges[0].node) + } + // Shouldn't be possible + return nil +} + +// SeekLowerBound is used to seek the iterator to the smallest key that is +// greater or equal to the given key. There is no watch variant as it's hard to +// predict based on the radix structure which node(s) changes might affect the +// result. +func (i *Iterator) SeekLowerBound(key []byte) { + // Wipe the stack. Unlike Prefix iteration, we need to build the stack as we + // go because we need only a subset of edges of many nodes in the path to the + // leaf with the lower bound. Note that the iterator will still recurse into + // children that we don't traverse on the way to the reverse lower bound as it + // walks the stack. + i.stack = []edges{} + // i.node starts off in the common case as pointing to the root node of the + // tree. By the time we return we have either found a lower bound and setup + // the stack to traverse all larger keys, or we have not and the stack and + // node should both be nil to prevent the iterator from assuming it is just + // iterating the whole tree from the root node. Either way this needs to end + // up as nil so just set it here. + n := i.node + i.node = nil + search := key + + found := func(n *Node) { + i.stack = append(i.stack, edges{edge{node: n}}) + } + + findMin := func(n *Node) { + n = i.recurseMin(n) + if n != nil { + found(n) + return + } + } + + for { + // Compare current prefix with the search key's same-length prefix. + var prefixCmp int + if len(n.prefix) < len(search) { + prefixCmp = bytes.Compare(n.prefix, search[0:len(n.prefix)]) + } else { + prefixCmp = bytes.Compare(n.prefix, search) + } + + if prefixCmp > 0 { + // Prefix is larger, that means the lower bound is greater than the search + // and from now on we need to follow the minimum path to the smallest + // leaf under this subtree. + findMin(n) + return + } + + if prefixCmp < 0 { + // Prefix is smaller than search prefix, that means there is no lower + // bound + i.node = nil + return + } + + // Prefix is equal, we are still heading for an exact match. If this is a + // leaf and an exact match we're done. + if n.leaf != nil && bytes.Equal(n.leaf.key, key) { + found(n) + return + } + + // Consume the search prefix if the current node has one. Note that this is + // safe because if n.prefix is longer than the search slice prefixCmp would + // have been > 0 above and the method would have already returned. + search = search[len(n.prefix):] + + if len(search) == 0 { + // We've exhausted the search key, but the current node is not an exact + // match or not a leaf. That means that the leaf value if it exists, and + // all child nodes must be strictly greater, the smallest key in this + // subtree must be the lower bound. + findMin(n) + return + } + + // Otherwise, take the lower bound next edge. + idx, lbNode := n.getLowerBoundEdge(search[0]) + if lbNode == nil { + return + } + + // Create stack edges for the all strictly higher edges in this node. + if idx+1 < len(n.edges) { + i.stack = append(i.stack, n.edges[idx+1:]) + } + + // Recurse + n = lbNode + } +} + +// Next returns the next node in order +func (i *Iterator) Next() ([]byte, interface{}, bool) { + // Initialize our stack if needed + if i.stack == nil && i.node != nil { + i.stack = []edges{ + { + edge{node: i.node}, + }, + } + } + + for len(i.stack) > 0 { + // Inspect the last element of the stack + n := len(i.stack) + last := i.stack[n-1] + elem := last[0].node + + // Update the stack + if len(last) > 1 { + i.stack[n-1] = last[1:] + } else { + i.stack = i.stack[:n-1] + } + + // Push the edges onto the frontier + if len(elem.edges) > 0 { + i.stack = append(i.stack, elem.edges) + } + + // Return the leaf values if any + if elem.leaf != nil { + return elem.leaf.key, elem.leaf.val, true + } + } + return nil, nil, false +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/node.go b/vendor/github.com/hashicorp/go-immutable-radix/node.go new file mode 100644 index 000000000..359854808 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/node.go @@ -0,0 +1,334 @@ +package iradix + +import ( + "bytes" + "sort" +) + +// WalkFn is used when walking the tree. Takes a +// key and value, returning if iteration should +// be terminated. +type WalkFn func(k []byte, v interface{}) bool + +// leafNode is used to represent a value +type leafNode struct { + mutateCh chan struct{} + key []byte + val interface{} +} + +// edge is used to represent an edge node +type edge struct { + label byte + node *Node +} + +// Node is an immutable node in the radix tree +type Node struct { + // mutateCh is closed if this node is modified + mutateCh chan struct{} + + // leaf is used to store possible leaf + leaf *leafNode + + // prefix is the common prefix we ignore + prefix []byte + + // Edges should be stored in-order for iteration. + // We avoid a fully materialized slice to save memory, + // since in most cases we expect to be sparse + edges edges +} + +func (n *Node) isLeaf() bool { + return n.leaf != nil +} + +func (n *Node) addEdge(e edge) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= e.label + }) + n.edges = append(n.edges, e) + if idx != num { + copy(n.edges[idx+1:], n.edges[idx:num]) + n.edges[idx] = e + } +} + +func (n *Node) replaceEdge(e edge) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= e.label + }) + if idx < num && n.edges[idx].label == e.label { + n.edges[idx].node = e.node + return + } + panic("replacing missing edge") +} + +func (n *Node) getEdge(label byte) (int, *Node) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + return idx, n.edges[idx].node + } + return -1, nil +} + +func (n *Node) getLowerBoundEdge(label byte) (int, *Node) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + // we want lower bound behavior so return even if it's not an exact match + if idx < num { + return idx, n.edges[idx].node + } + return -1, nil +} + +func (n *Node) delEdge(label byte) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + copy(n.edges[idx:], n.edges[idx+1:]) + n.edges[len(n.edges)-1] = edge{} + n.edges = n.edges[:len(n.edges)-1] + } +} + +func (n *Node) GetWatch(k []byte) (<-chan struct{}, interface{}, bool) { + search := k + watch := n.mutateCh + for { + // Check for key exhaustion + if len(search) == 0 { + if n.isLeaf() { + return n.leaf.mutateCh, n.leaf.val, true + } + break + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + break + } + + // Update to the finest granularity as the search makes progress + watch = n.mutateCh + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + return watch, nil, false +} + +func (n *Node) Get(k []byte) (interface{}, bool) { + _, val, ok := n.GetWatch(k) + return val, ok +} + +// LongestPrefix is like Get, but instead of an +// exact match, it will return the longest prefix match. +func (n *Node) LongestPrefix(k []byte) ([]byte, interface{}, bool) { + var last *leafNode + search := k + for { + // Look for a leaf node + if n.isLeaf() { + last = n.leaf + } + + // Check for key exhaution + if len(search) == 0 { + break + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + if last != nil { + return last.key, last.val, true + } + return nil, nil, false +} + +// Minimum is used to return the minimum value in the tree +func (n *Node) Minimum() ([]byte, interface{}, bool) { + for { + if n.isLeaf() { + return n.leaf.key, n.leaf.val, true + } + if len(n.edges) > 0 { + n = n.edges[0].node + } else { + break + } + } + return nil, nil, false +} + +// Maximum is used to return the maximum value in the tree +func (n *Node) Maximum() ([]byte, interface{}, bool) { + for { + if num := len(n.edges); num > 0 { + n = n.edges[num-1].node + continue + } + if n.isLeaf() { + return n.leaf.key, n.leaf.val, true + } else { + break + } + } + return nil, nil, false +} + +// Iterator is used to return an iterator at +// the given node to walk the tree +func (n *Node) Iterator() *Iterator { + return &Iterator{node: n} +} + +// ReverseIterator is used to return an iterator at +// the given node to walk the tree backwards +func (n *Node) ReverseIterator() *ReverseIterator { + return NewReverseIterator(n) +} + +// rawIterator is used to return a raw iterator at the given node to walk the +// tree. +func (n *Node) rawIterator() *rawIterator { + iter := &rawIterator{node: n} + iter.Next() + return iter +} + +// Walk is used to walk the tree +func (n *Node) Walk(fn WalkFn) { + recursiveWalk(n, fn) +} + +// WalkBackwards is used to walk the tree in reverse order +func (n *Node) WalkBackwards(fn WalkFn) { + reverseRecursiveWalk(n, fn) +} + +// WalkPrefix is used to walk the tree under a prefix +func (n *Node) WalkPrefix(prefix []byte, fn WalkFn) { + search := prefix + for { + // Check for key exhaution + if len(search) == 0 { + recursiveWalk(n, fn) + return + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + + } else if bytes.HasPrefix(n.prefix, search) { + // Child may be under our search prefix + recursiveWalk(n, fn) + return + } else { + break + } + } +} + +// WalkPath is used to walk the tree, but only visiting nodes +// from the root down to a given leaf. Where WalkPrefix walks +// all the entries *under* the given prefix, this walks the +// entries *above* the given prefix. +func (n *Node) WalkPath(path []byte, fn WalkFn) { + search := path + for { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return + } + + // Check for key exhaution + if len(search) == 0 { + return + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + return + } + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } +} + +// recursiveWalk is used to do a pre-order walk of a node +// recursively. Returns true if the walk should be aborted +func recursiveWalk(n *Node, fn WalkFn) bool { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return true + } + + // Recurse on the children + for _, e := range n.edges { + if recursiveWalk(e.node, fn) { + return true + } + } + return false +} + +// reverseRecursiveWalk is used to do a reverse pre-order +// walk of a node recursively. Returns true if the walk +// should be aborted +func reverseRecursiveWalk(n *Node, fn WalkFn) bool { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return true + } + + // Recurse on the children in reverse order + for i := len(n.edges) - 1; i >= 0; i-- { + e := n.edges[i] + if reverseRecursiveWalk(e.node, fn) { + return true + } + } + return false +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go b/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go new file mode 100644 index 000000000..3c6a22525 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go @@ -0,0 +1,78 @@ +package iradix + +// rawIterator visits each of the nodes in the tree, even the ones that are not +// leaves. It keeps track of the effective path (what a leaf at a given node +// would be called), which is useful for comparing trees. +type rawIterator struct { + // node is the starting node in the tree for the iterator. + node *Node + + // stack keeps track of edges in the frontier. + stack []rawStackEntry + + // pos is the current position of the iterator. + pos *Node + + // path is the effective path of the current iterator position, + // regardless of whether the current node is a leaf. + path string +} + +// rawStackEntry is used to keep track of the cumulative common path as well as +// its associated edges in the frontier. +type rawStackEntry struct { + path string + edges edges +} + +// Front returns the current node that has been iterated to. +func (i *rawIterator) Front() *Node { + return i.pos +} + +// Path returns the effective path of the current node, even if it's not actually +// a leaf. +func (i *rawIterator) Path() string { + return i.path +} + +// Next advances the iterator to the next node. +func (i *rawIterator) Next() { + // Initialize our stack if needed. + if i.stack == nil && i.node != nil { + i.stack = []rawStackEntry{ + { + edges: edges{ + edge{node: i.node}, + }, + }, + } + } + + for len(i.stack) > 0 { + // Inspect the last element of the stack. + n := len(i.stack) + last := i.stack[n-1] + elem := last.edges[0].node + + // Update the stack. + if len(last.edges) > 1 { + i.stack[n-1].edges = last.edges[1:] + } else { + i.stack = i.stack[:n-1] + } + + // Push the edges onto the frontier. + if len(elem.edges) > 0 { + path := last.path + string(elem.prefix) + i.stack = append(i.stack, rawStackEntry{path, elem.edges}) + } + + i.pos = elem + i.path = last.path + string(elem.prefix) + return + } + + i.pos = nil + i.path = "" +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/reverse_iter.go b/vendor/github.com/hashicorp/go-immutable-radix/reverse_iter.go new file mode 100644 index 000000000..554fa7129 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/reverse_iter.go @@ -0,0 +1,239 @@ +package iradix + +import ( + "bytes" +) + +// ReverseIterator is used to iterate over a set of nodes +// in reverse in-order +type ReverseIterator struct { + i *Iterator + + // expandedParents stores the set of parent nodes whose relevant children have + // already been pushed into the stack. This can happen during seek or during + // iteration. + // + // Unlike forward iteration we need to recurse into children before we can + // output the value stored in an internal leaf since all children are greater. + // We use this to track whether we have already ensured all the children are + // in the stack. + expandedParents map[*Node]struct{} +} + +// NewReverseIterator returns a new ReverseIterator at a node +func NewReverseIterator(n *Node) *ReverseIterator { + return &ReverseIterator{ + i: &Iterator{node: n}, + } +} + +// SeekPrefixWatch is used to seek the iterator to a given prefix +// and returns the watch channel of the finest granularity +func (ri *ReverseIterator) SeekPrefixWatch(prefix []byte) (watch <-chan struct{}) { + return ri.i.SeekPrefixWatch(prefix) +} + +// SeekPrefix is used to seek the iterator to a given prefix +func (ri *ReverseIterator) SeekPrefix(prefix []byte) { + ri.i.SeekPrefixWatch(prefix) +} + +// SeekReverseLowerBound is used to seek the iterator to the largest key that is +// lower or equal to the given key. There is no watch variant as it's hard to +// predict based on the radix structure which node(s) changes might affect the +// result. +func (ri *ReverseIterator) SeekReverseLowerBound(key []byte) { + // Wipe the stack. Unlike Prefix iteration, we need to build the stack as we + // go because we need only a subset of edges of many nodes in the path to the + // leaf with the lower bound. Note that the iterator will still recurse into + // children that we don't traverse on the way to the reverse lower bound as it + // walks the stack. + ri.i.stack = []edges{} + // ri.i.node starts off in the common case as pointing to the root node of the + // tree. By the time we return we have either found a lower bound and setup + // the stack to traverse all larger keys, or we have not and the stack and + // node should both be nil to prevent the iterator from assuming it is just + // iterating the whole tree from the root node. Either way this needs to end + // up as nil so just set it here. + n := ri.i.node + ri.i.node = nil + search := key + + if ri.expandedParents == nil { + ri.expandedParents = make(map[*Node]struct{}) + } + + found := func(n *Node) { + ri.i.stack = append(ri.i.stack, edges{edge{node: n}}) + // We need to mark this node as expanded in advance too otherwise the + // iterator will attempt to walk all of its children even though they are + // greater than the lower bound we have found. We've expanded it in the + // sense that all of its children that we want to walk are already in the + // stack (i.e. none of them). + ri.expandedParents[n] = struct{}{} + } + + for { + // Compare current prefix with the search key's same-length prefix. + var prefixCmp int + if len(n.prefix) < len(search) { + prefixCmp = bytes.Compare(n.prefix, search[0:len(n.prefix)]) + } else { + prefixCmp = bytes.Compare(n.prefix, search) + } + + if prefixCmp < 0 { + // Prefix is smaller than search prefix, that means there is no exact + // match for the search key. But we are looking in reverse, so the reverse + // lower bound will be the largest leaf under this subtree, since it is + // the value that would come right before the current search key if it + // were in the tree. So we need to follow the maximum path in this subtree + // to find it. Note that this is exactly what the iterator will already do + // if it finds a node in the stack that has _not_ been marked as expanded + // so in this one case we don't call `found` and instead let the iterator + // do the expansion and recursion through all the children. + ri.i.stack = append(ri.i.stack, edges{edge{node: n}}) + return + } + + if prefixCmp > 0 { + // Prefix is larger than search prefix, or there is no prefix but we've + // also exhausted the search key. Either way, that means there is no + // reverse lower bound since nothing comes before our current search + // prefix. + return + } + + // If this is a leaf, something needs to happen! Note that if it's a leaf + // and prefixCmp was zero (which it must be to get here) then the leaf value + // is either an exact match for the search, or it's lower. It can't be + // greater. + if n.isLeaf() { + + // Firstly, if it's an exact match, we're done! + if bytes.Equal(n.leaf.key, key) { + found(n) + return + } + + // It's not so this node's leaf value must be lower and could still be a + // valid contender for reverse lower bound. + + // If it has no children then we are also done. + if len(n.edges) == 0 { + // This leaf is the lower bound. + found(n) + return + } + + // Finally, this leaf is internal (has children) so we'll keep searching, + // but we need to add it to the iterator's stack since it has a leaf value + // that needs to be iterated over. It needs to be added to the stack + // before its children below as it comes first. + ri.i.stack = append(ri.i.stack, edges{edge{node: n}}) + // We also need to mark it as expanded since we'll be adding any of its + // relevant children below and so don't want the iterator to re-add them + // on its way back up the stack. + ri.expandedParents[n] = struct{}{} + } + + // Consume the search prefix. Note that this is safe because if n.prefix is + // longer than the search slice prefixCmp would have been > 0 above and the + // method would have already returned. + search = search[len(n.prefix):] + + if len(search) == 0 { + // We've exhausted the search key but we are not at a leaf. That means all + // children are greater than the search key so a reverse lower bound + // doesn't exist in this subtree. Note that there might still be one in + // the whole radix tree by following a different path somewhere further + // up. If that's the case then the iterator's stack will contain all the + // smaller nodes already and Previous will walk through them correctly. + return + } + + // Otherwise, take the lower bound next edge. + idx, lbNode := n.getLowerBoundEdge(search[0]) + + // From here, we need to update the stack with all values lower than + // the lower bound edge. Since getLowerBoundEdge() returns -1 when the + // search prefix is larger than all edges, we need to place idx at the + // last edge index so they can all be place in the stack, since they + // come before our search prefix. + if idx == -1 { + idx = len(n.edges) + } + + // Create stack edges for the all strictly lower edges in this node. + if len(n.edges[:idx]) > 0 { + ri.i.stack = append(ri.i.stack, n.edges[:idx]) + } + + // Exit if there's no lower bound edge. The stack will have the previous + // nodes already. + if lbNode == nil { + return + } + + // Recurse + n = lbNode + } +} + +// Previous returns the previous node in reverse order +func (ri *ReverseIterator) Previous() ([]byte, interface{}, bool) { + // Initialize our stack if needed + if ri.i.stack == nil && ri.i.node != nil { + ri.i.stack = []edges{ + { + edge{node: ri.i.node}, + }, + } + } + + if ri.expandedParents == nil { + ri.expandedParents = make(map[*Node]struct{}) + } + + for len(ri.i.stack) > 0 { + // Inspect the last element of the stack + n := len(ri.i.stack) + last := ri.i.stack[n-1] + m := len(last) + elem := last[m-1].node + + _, alreadyExpanded := ri.expandedParents[elem] + + // If this is an internal node and we've not seen it already, we need to + // leave it in the stack so we can return its possible leaf value _after_ + // we've recursed through all its children. + if len(elem.edges) > 0 && !alreadyExpanded { + // record that we've seen this node! + ri.expandedParents[elem] = struct{}{} + // push child edges onto stack and skip the rest of the loop to recurse + // into the largest one. + ri.i.stack = append(ri.i.stack, elem.edges) + continue + } + + // Remove the node from the stack + if m > 1 { + ri.i.stack[n-1] = last[:m-1] + } else { + ri.i.stack = ri.i.stack[:n-1] + } + // We don't need this state any more as it's no longer in the stack so we + // won't visit it again + if alreadyExpanded { + delete(ri.expandedParents, elem) + } + + // If this is a leaf, return it + if elem.leaf != nil { + return elem.leaf.key, elem.leaf.val, true + } + + // it's not a leaf so keep walking the stack to find the previous leaf + } + return nil, nil, false +} diff --git a/vendor/github.com/hashicorp/go-memdb/.gitignore b/vendor/github.com/hashicorp/go-memdb/.gitignore new file mode 100644 index 000000000..11b90db8d --- /dev/null +++ b/vendor/github.com/hashicorp/go-memdb/.gitignore @@ -0,0 +1,26 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +.idea diff --git a/vendor/github.com/hashicorp/go-memdb/LICENSE b/vendor/github.com/hashicorp/go-memdb/LICENSE new file mode 100644 index 000000000..e87a115e4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-memdb/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-memdb/README.md b/vendor/github.com/hashicorp/go-memdb/README.md new file mode 100644 index 000000000..080b7447b --- /dev/null +++ b/vendor/github.com/hashicorp/go-memdb/README.md @@ -0,0 +1,146 @@ +# go-memdb [![CircleCI](https://circleci.com/gh/hashicorp/go-memdb/tree/master.svg?style=svg)](https://circleci.com/gh/hashicorp/go-memdb/tree/master) + +Provides the `memdb` package that implements a simple in-memory database +built on immutable radix trees. The database provides Atomicity, Consistency +and Isolation from ACID. Being that it is in-memory, it does not provide durability. +The database is instantiated with a schema that specifies the tables and indices +that exist and allows transactions to be executed. + +The database provides the following: + +* Multi-Version Concurrency Control (MVCC) - By leveraging immutable radix trees + the database is able to support any number of concurrent readers without locking, + and allows a writer to make progress. + +* Transaction Support - The database allows for rich transactions, in which multiple + objects are inserted, updated or deleted. The transactions can span multiple tables, + and are applied atomically. The database provides atomicity and isolation in ACID + terminology, such that until commit the updates are not visible. + +* Rich Indexing - Tables can support any number of indexes, which can be simple like + a single field index, or more advanced compound field indexes. Certain types like + UUID can be efficiently compressed from strings into byte indexes for reduced + storage requirements. + +* Watches - Callers can populate a watch set as part of a query, which can be used to + detect when a modification has been made to the database which affects the query + results. This lets callers easily watch for changes in the database in a very general + way. + +For the underlying immutable radix trees, see [go-immutable-radix](https://github.com/hashicorp/go-immutable-radix). + +Documentation +============= + +The full documentation is available on [Godoc](https://pkg.go.dev/github.com/hashicorp/go-memdb). + +Example +======= + +Below is a [simple example](https://play.golang.org/p/gCGE9FA4og1) of usage + +```go +// Create a sample struct +type Person struct { + Email string + Name string + Age int +} + +// Create the DB schema +schema := &memdb.DBSchema{ + Tables: map[string]*memdb.TableSchema{ + "person": &memdb.TableSchema{ + Name: "person", + Indexes: map[string]*memdb.IndexSchema{ + "id": &memdb.IndexSchema{ + Name: "id", + Unique: true, + Indexer: &memdb.StringFieldIndex{Field: "Email"}, + }, + "age": &memdb.IndexSchema{ + Name: "age", + Unique: false, + Indexer: &memdb.IntFieldIndex{Field: "Age"}, + }, + }, + }, + }, +} + +// Create a new data base +db, err := memdb.NewMemDB(schema) +if err != nil { + panic(err) +} + +// Create a write transaction +txn := db.Txn(true) + +// Insert some people +people := []*Person{ + &Person{"joe@aol.com", "Joe", 30}, + &Person{"lucy@aol.com", "Lucy", 35}, + &Person{"tariq@aol.com", "Tariq", 21}, + &Person{"dorothy@aol.com", "Dorothy", 53}, +} +for _, p := range people { + if err := txn.Insert("person", p); err != nil { + panic(err) + } +} + +// Commit the transaction +txn.Commit() + +// Create read-only transaction +txn = db.Txn(false) +defer txn.Abort() + +// Lookup by email +raw, err := txn.First("person", "id", "joe@aol.com") +if err != nil { + panic(err) +} + +// Say hi! +fmt.Printf("Hello %s!\n", raw.(*Person).Name) + +// List all the people +it, err := txn.Get("person", "id") +if err != nil { + panic(err) +} + +fmt.Println("All the people:") +for obj := it.Next(); obj != nil; obj = it.Next() { + p := obj.(*Person) + fmt.Printf(" %s\n", p.Name) +} + +// Range scan over people with ages between 25 and 35 inclusive +it, err = txn.LowerBound("person", "age", 25) +if err != nil { + panic(err) +} + +fmt.Println("People aged 25 - 35:") +for obj := it.Next(); obj != nil; obj = it.Next() { + p := obj.(*Person) + if p.Age > 35 { + break + } + fmt.Printf(" %s is aged %d\n", p.Name, p.Age) +} +// Output: +// Hello Joe! +// All the people: +// Dorothy +// Joe +// Lucy +// Tariq +// People aged 25 - 35: +// Joe is aged 30 +// Lucy is aged 35 +``` + diff --git a/vendor/github.com/hashicorp/go-memdb/changes.go b/vendor/github.com/hashicorp/go-memdb/changes.go new file mode 100644 index 000000000..35089f5ce --- /dev/null +++ b/vendor/github.com/hashicorp/go-memdb/changes.go @@ -0,0 +1,34 @@ +package memdb + +// Changes describes a set of mutations to memDB tables performed during a +// transaction. +type Changes []Change + +// Change describes a mutation to an object in a table. +type Change struct { + Table string + Before interface{} + After interface{} + + // primaryKey stores the raw key value from the primary index so that we can + // de-duplicate multiple updates of the same object in the same transaction + // but we don't expose this implementation detail to the consumer. + primaryKey []byte +} + +// Created returns true if the mutation describes a new object being inserted. +func (m *Change) Created() bool { + return m.Before == nil && m.After != nil +} + +// Updated returns true if the mutation describes an existing object being +// updated. +func (m *Change) Updated() bool { + return m.Before != nil && m.After != nil +} + +// Deleted returns true if the mutation describes an existing object being +// deleted. +func (m *Change) Deleted() bool { + return m.Before != nil && m.After == nil +} diff --git a/vendor/github.com/hashicorp/go-memdb/filter.go b/vendor/github.com/hashicorp/go-memdb/filter.go new file mode 100644 index 000000000..0071ab311 --- /dev/null +++ b/vendor/github.com/hashicorp/go-memdb/filter.go @@ -0,0 +1,38 @@ +package memdb + +// FilterFunc is a function that takes the results of an iterator and returns +// whether the result should be filtered out. +type FilterFunc func(interface{}) bool + +// FilterIterator is used to wrap a ResultIterator and apply a filter over it. +type FilterIterator struct { + // filter is the filter function applied over the base iterator. + filter FilterFunc + + // iter is the iterator that is being wrapped. + iter ResultIterator +} + +// NewFilterIterator wraps a ResultIterator. The filter function is applied +// to each value returned by a call to iter.Next. +// +// See the documentation for ResultIterator to understand the behaviour of the +// returned FilterIterator. +func NewFilterIterator(iter ResultIterator, filter FilterFunc) *FilterIterator { + return &FilterIterator{ + filter: filter, + iter: iter, + } +} + +// WatchCh returns the watch channel of the wrapped iterator. +func (f *FilterIterator) WatchCh() <-chan struct{} { return f.iter.WatchCh() } + +// Next returns the next non-filtered result from the wrapped iterator. +func (f *FilterIterator) Next() interface{} { + for { + if value := f.iter.Next(); value == nil || !f.filter(value) { + return value + } + } +} diff --git a/vendor/github.com/hashicorp/go-memdb/index.go b/vendor/github.com/hashicorp/go-memdb/index.go new file mode 100644 index 000000000..172a0e86b --- /dev/null +++ b/vendor/github.com/hashicorp/go-memdb/index.go @@ -0,0 +1,931 @@ +package memdb + +import ( + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + "reflect" + "strconv" + "strings" +) + +// Indexer is an interface used for defining indexes. Indexes are used +// for efficient lookup of objects in a MemDB table. An Indexer must also +// implement one of SingleIndexer or MultiIndexer. +// +// Indexers are primarily responsible for returning the lookup key as +// a byte slice. The byte slice is the key data in the underlying data storage. +type Indexer interface { + // FromArgs is called to build the exact index key from a list of arguments. + FromArgs(args ...interface{}) ([]byte, error) +} + +// SingleIndexer is an interface used for defining indexes that generate a +// single value per object +type SingleIndexer interface { + // FromObject extracts the index value from an object. The return values + // are whether the index value was found, the index value, and any error + // while extracting the index value, respectively. + FromObject(raw interface{}) (bool, []byte, error) +} + +// MultiIndexer is an interface used for defining indexes that generate +// multiple values per object. Each value is stored as a seperate index +// pointing to the same object. +// +// For example, an index that extracts the first and last name of a person +// and allows lookup based on eitherd would be a MultiIndexer. The FromObject +// of this example would split the first and last name and return both as +// values. +type MultiIndexer interface { + // FromObject extracts index values from an object. The return values + // are the same as a SingleIndexer except there can be multiple index + // values. + FromObject(raw interface{}) (bool, [][]byte, error) +} + +// PrefixIndexer is an optional interface on top of an Indexer that allows +// indexes to support prefix-based iteration. +type PrefixIndexer interface { + // PrefixFromArgs is the same as FromArgs for an Indexer except that + // the index value returned should return all prefix-matched values. + PrefixFromArgs(args ...interface{}) ([]byte, error) +} + +// StringFieldIndex is used to extract a field from an object +// using reflection and builds an index on that field. +type StringFieldIndex struct { + Field string + Lowercase bool +} + +func (s *StringFieldIndex) FromObject(obj interface{}) (bool, []byte, error) { + v := reflect.ValueOf(obj) + v = reflect.Indirect(v) // Dereference the pointer if any + + fv := v.FieldByName(s.Field) + isPtr := fv.Kind() == reflect.Ptr + fv = reflect.Indirect(fv) + if !isPtr && !fv.IsValid() { + return false, nil, + fmt.Errorf("field '%s' for %#v is invalid %v ", s.Field, obj, isPtr) + } + + if isPtr && !fv.IsValid() { + val := "" + return false, []byte(val), nil + } + + val := fv.String() + if val == "" { + return false, nil, nil + } + + if s.Lowercase { + val = strings.ToLower(val) + } + + // Add the null character as a terminator + val += "\x00" + return true, []byte(val), nil +} + +func (s *StringFieldIndex) FromArgs(args ...interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + arg, ok := args[0].(string) + if !ok { + return nil, fmt.Errorf("argument must be a string: %#v", args[0]) + } + if s.Lowercase { + arg = strings.ToLower(arg) + } + // Add the null character as a terminator + arg += "\x00" + return []byte(arg), nil +} + +func (s *StringFieldIndex) PrefixFromArgs(args ...interface{}) ([]byte, error) { + val, err := s.FromArgs(args...) + if err != nil { + return nil, err + } + + // Strip the null terminator, the rest is a prefix + n := len(val) + if n > 0 { + return val[:n-1], nil + } + return val, nil +} + +// StringSliceFieldIndex builds an index from a field on an object that is a +// string slice ([]string). Each value within the string slice can be used for +// lookup. +type StringSliceFieldIndex struct { + Field string + Lowercase bool +} + +func (s *StringSliceFieldIndex) FromObject(obj interface{}) (bool, [][]byte, error) { + v := reflect.ValueOf(obj) + v = reflect.Indirect(v) // Dereference the pointer if any + + fv := v.FieldByName(s.Field) + if !fv.IsValid() { + return false, nil, + fmt.Errorf("field '%s' for %#v is invalid", s.Field, obj) + } + + if fv.Kind() != reflect.Slice || fv.Type().Elem().Kind() != reflect.String { + return false, nil, fmt.Errorf("field '%s' is not a string slice", s.Field) + } + + length := fv.Len() + vals := make([][]byte, 0, length) + for i := 0; i < fv.Len(); i++ { + val := fv.Index(i).String() + if val == "" { + continue + } + + if s.Lowercase { + val = strings.ToLower(val) + } + + // Add the null character as a terminator + val += "\x00" + vals = append(vals, []byte(val)) + } + if len(vals) == 0 { + return false, nil, nil + } + return true, vals, nil +} + +func (s *StringSliceFieldIndex) FromArgs(args ...interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + arg, ok := args[0].(string) + if !ok { + return nil, fmt.Errorf("argument must be a string: %#v", args[0]) + } + if s.Lowercase { + arg = strings.ToLower(arg) + } + // Add the null character as a terminator + arg += "\x00" + return []byte(arg), nil +} + +func (s *StringSliceFieldIndex) PrefixFromArgs(args ...interface{}) ([]byte, error) { + val, err := s.FromArgs(args...) + if err != nil { + return nil, err + } + + // Strip the null terminator, the rest is a prefix + n := len(val) + if n > 0 { + return val[:n-1], nil + } + return val, nil +} + +// StringMapFieldIndex is used to extract a field of type map[string]string +// from an object using reflection and builds an index on that field. +// +// Note that although FromArgs in theory supports using either one or +// two arguments, there is a bug: FromObject only creates an index +// using key/value, and does not also create an index using key. This +// means a lookup using one argument will never actually work. +// +// It is currently left as-is to prevent backwards compatibility +// issues. +// +// TODO: Fix this in the next major bump. +type StringMapFieldIndex struct { + Field string + Lowercase bool +} + +var MapType = reflect.MapOf(reflect.TypeOf(""), reflect.TypeOf("")).Kind() + +func (s *StringMapFieldIndex) FromObject(obj interface{}) (bool, [][]byte, error) { + v := reflect.ValueOf(obj) + v = reflect.Indirect(v) // Dereference the pointer if any + + fv := v.FieldByName(s.Field) + if !fv.IsValid() { + return false, nil, fmt.Errorf("field '%s' for %#v is invalid", s.Field, obj) + } + + if fv.Kind() != MapType { + return false, nil, fmt.Errorf("field '%s' is not a map[string]string", s.Field) + } + + length := fv.Len() + vals := make([][]byte, 0, length) + for _, key := range fv.MapKeys() { + k := key.String() + if k == "" { + continue + } + val := fv.MapIndex(key).String() + + if s.Lowercase { + k = strings.ToLower(k) + val = strings.ToLower(val) + } + + // Add the null character as a terminator + k += "\x00" + val + "\x00" + + vals = append(vals, []byte(k)) + } + if len(vals) == 0 { + return false, nil, nil + } + return true, vals, nil +} + +// WARNING: Because of a bug in FromObject, this function will never return +// a value when using the single-argument version. +func (s *StringMapFieldIndex) FromArgs(args ...interface{}) ([]byte, error) { + if len(args) > 2 || len(args) == 0 { + return nil, fmt.Errorf("must provide one or two arguments") + } + key, ok := args[0].(string) + if !ok { + return nil, fmt.Errorf("argument must be a string: %#v", args[0]) + } + if s.Lowercase { + key = strings.ToLower(key) + } + // Add the null character as a terminator + key += "\x00" + + if len(args) == 2 { + val, ok := args[1].(string) + if !ok { + return nil, fmt.Errorf("argument must be a string: %#v", args[1]) + } + if s.Lowercase { + val = strings.ToLower(val) + } + // Add the null character as a terminator + key += val + "\x00" + } + + return []byte(key), nil +} + +// IntFieldIndex is used to extract an int field from an object using +// reflection and builds an index on that field. +type IntFieldIndex struct { + Field string +} + +func (i *IntFieldIndex) FromObject(obj interface{}) (bool, []byte, error) { + v := reflect.ValueOf(obj) + v = reflect.Indirect(v) // Dereference the pointer if any + + fv := v.FieldByName(i.Field) + if !fv.IsValid() { + return false, nil, + fmt.Errorf("field '%s' for %#v is invalid", i.Field, obj) + } + + // Check the type + k := fv.Kind() + size, ok := IsIntType(k) + if !ok { + return false, nil, fmt.Errorf("field %q is of type %v; want an int", i.Field, k) + } + + // Get the value and encode it + val := fv.Int() + buf := encodeInt(val, size) + + return true, buf, nil +} + +func (i *IntFieldIndex) FromArgs(args ...interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + + v := reflect.ValueOf(args[0]) + if !v.IsValid() { + return nil, fmt.Errorf("%#v is invalid", args[0]) + } + + k := v.Kind() + size, ok := IsIntType(k) + if !ok { + return nil, fmt.Errorf("arg is of type %v; want a int", k) + } + + val := v.Int() + buf := encodeInt(val, size) + + return buf, nil +} + +func encodeInt(val int64, size int) []byte { + buf := make([]byte, size) + + // This bit flips the sign bit on any sized signed twos-complement integer, + // which when truncated to a uint of the same size will bias the value such + // that the maximum negative int becomes 0, and the maximum positive int + // becomes the maximum positive uint. + scaled := val ^ int64(-1<<(size*8-1)) + + switch size { + case 1: + buf[0] = uint8(scaled) + case 2: + binary.BigEndian.PutUint16(buf, uint16(scaled)) + case 4: + binary.BigEndian.PutUint32(buf, uint32(scaled)) + case 8: + binary.BigEndian.PutUint64(buf, uint64(scaled)) + default: + panic(fmt.Sprintf("unsupported int size parameter: %d", size)) + } + + return buf +} + +// IsIntType returns whether the passed type is a type of int and the number +// of bytes needed to encode the type. +func IsIntType(k reflect.Kind) (size int, okay bool) { + switch k { + case reflect.Int: + return strconv.IntSize / 8, true + case reflect.Int8: + return 1, true + case reflect.Int16: + return 2, true + case reflect.Int32: + return 4, true + case reflect.Int64: + return 8, true + default: + return 0, false + } +} + +// UintFieldIndex is used to extract a uint field from an object using +// reflection and builds an index on that field. +type UintFieldIndex struct { + Field string +} + +func (u *UintFieldIndex) FromObject(obj interface{}) (bool, []byte, error) { + v := reflect.ValueOf(obj) + v = reflect.Indirect(v) // Dereference the pointer if any + + fv := v.FieldByName(u.Field) + if !fv.IsValid() { + return false, nil, + fmt.Errorf("field '%s' for %#v is invalid", u.Field, obj) + } + + // Check the type + k := fv.Kind() + size, ok := IsUintType(k) + if !ok { + return false, nil, fmt.Errorf("field %q is of type %v; want a uint", u.Field, k) + } + + // Get the value and encode it + val := fv.Uint() + buf := encodeUInt(val, size) + + return true, buf, nil +} + +func (u *UintFieldIndex) FromArgs(args ...interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + + v := reflect.ValueOf(args[0]) + if !v.IsValid() { + return nil, fmt.Errorf("%#v is invalid", args[0]) + } + + k := v.Kind() + size, ok := IsUintType(k) + if !ok { + return nil, fmt.Errorf("arg is of type %v; want a uint", k) + } + + val := v.Uint() + buf := encodeUInt(val, size) + + return buf, nil +} + +func encodeUInt(val uint64, size int) []byte { + buf := make([]byte, size) + + switch size { + case 1: + buf[0] = uint8(val) + case 2: + binary.BigEndian.PutUint16(buf, uint16(val)) + case 4: + binary.BigEndian.PutUint32(buf, uint32(val)) + case 8: + binary.BigEndian.PutUint64(buf, val) + default: + panic(fmt.Sprintf("unsupported uint size parameter: %d", size)) + } + + return buf +} + +// IsUintType returns whether the passed type is a type of uint and the number +// of bytes needed to encode the type. +func IsUintType(k reflect.Kind) (size int, okay bool) { + switch k { + case reflect.Uint: + return strconv.IntSize / 8, true + case reflect.Uint8: + return 1, true + case reflect.Uint16: + return 2, true + case reflect.Uint32: + return 4, true + case reflect.Uint64: + return 8, true + default: + return 0, false + } +} + +// BoolFieldIndex is used to extract an boolean field from an object using +// reflection and builds an index on that field. +type BoolFieldIndex struct { + Field string +} + +func (i *BoolFieldIndex) FromObject(obj interface{}) (bool, []byte, error) { + v := reflect.ValueOf(obj) + v = reflect.Indirect(v) // Dereference the pointer if any + + fv := v.FieldByName(i.Field) + if !fv.IsValid() { + return false, nil, + fmt.Errorf("field '%s' for %#v is invalid", i.Field, obj) + } + + // Check the type + k := fv.Kind() + if k != reflect.Bool { + return false, nil, fmt.Errorf("field %q is of type %v; want a bool", i.Field, k) + } + + // Get the value and encode it + buf := make([]byte, 1) + if fv.Bool() { + buf[0] = 1 + } + + return true, buf, nil +} + +func (i *BoolFieldIndex) FromArgs(args ...interface{}) ([]byte, error) { + return fromBoolArgs(args) +} + +// UUIDFieldIndex is used to extract a field from an object +// using reflection and builds an index on that field by treating +// it as a UUID. This is an optimization to using a StringFieldIndex +// as the UUID can be more compactly represented in byte form. +type UUIDFieldIndex struct { + Field string +} + +func (u *UUIDFieldIndex) FromObject(obj interface{}) (bool, []byte, error) { + v := reflect.ValueOf(obj) + v = reflect.Indirect(v) // Dereference the pointer if any + + fv := v.FieldByName(u.Field) + if !fv.IsValid() { + return false, nil, + fmt.Errorf("field '%s' for %#v is invalid", u.Field, obj) + } + + val := fv.String() + if val == "" { + return false, nil, nil + } + + buf, err := u.parseString(val, true) + return true, buf, err +} + +func (u *UUIDFieldIndex) FromArgs(args ...interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + switch arg := args[0].(type) { + case string: + return u.parseString(arg, true) + case []byte: + if len(arg) != 16 { + return nil, fmt.Errorf("byte slice must be 16 characters") + } + return arg, nil + default: + return nil, + fmt.Errorf("argument must be a string or byte slice: %#v", args[0]) + } +} + +func (u *UUIDFieldIndex) PrefixFromArgs(args ...interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + switch arg := args[0].(type) { + case string: + return u.parseString(arg, false) + case []byte: + return arg, nil + default: + return nil, + fmt.Errorf("argument must be a string or byte slice: %#v", args[0]) + } +} + +// parseString parses a UUID from the string. If enforceLength is false, it will +// parse a partial UUID. An error is returned if the input, stripped of hyphens, +// is not even length. +func (u *UUIDFieldIndex) parseString(s string, enforceLength bool) ([]byte, error) { + // Verify the length + l := len(s) + if enforceLength && l != 36 { + return nil, fmt.Errorf("UUID must be 36 characters") + } else if l > 36 { + return nil, fmt.Errorf("Invalid UUID length. UUID have 36 characters; got %d", l) + } + + hyphens := strings.Count(s, "-") + if hyphens > 4 { + return nil, fmt.Errorf(`UUID should have maximum of 4 "-"; got %d`, hyphens) + } + + // The sanitized length is the length of the original string without the "-". + sanitized := strings.Replace(s, "-", "", -1) + sanitizedLength := len(sanitized) + if sanitizedLength%2 != 0 { + return nil, fmt.Errorf("Input (without hyphens) must be even length") + } + + dec, err := hex.DecodeString(sanitized) + if err != nil { + return nil, fmt.Errorf("Invalid UUID: %v", err) + } + + return dec, nil +} + +// FieldSetIndex is used to extract a field from an object using reflection and +// builds an index on whether the field is set by comparing it against its +// type's nil value. +type FieldSetIndex struct { + Field string +} + +func (f *FieldSetIndex) FromObject(obj interface{}) (bool, []byte, error) { + v := reflect.ValueOf(obj) + v = reflect.Indirect(v) // Dereference the pointer if any + + fv := v.FieldByName(f.Field) + if !fv.IsValid() { + return false, nil, + fmt.Errorf("field '%s' for %#v is invalid", f.Field, obj) + } + + if fv.Interface() == reflect.Zero(fv.Type()).Interface() { + return true, []byte{0}, nil + } + + return true, []byte{1}, nil +} + +func (f *FieldSetIndex) FromArgs(args ...interface{}) ([]byte, error) { + return fromBoolArgs(args) +} + +// ConditionalIndex builds an index based on a condition specified by a passed +// user function. This function may examine the passed object and return a +// boolean to encapsulate an arbitrarily complex conditional. +type ConditionalIndex struct { + Conditional ConditionalIndexFunc +} + +// ConditionalIndexFunc is the required function interface for a +// ConditionalIndex. +type ConditionalIndexFunc func(obj interface{}) (bool, error) + +func (c *ConditionalIndex) FromObject(obj interface{}) (bool, []byte, error) { + // Call the user's function + res, err := c.Conditional(obj) + if err != nil { + return false, nil, fmt.Errorf("ConditionalIndexFunc(%#v) failed: %v", obj, err) + } + + if res { + return true, []byte{1}, nil + } + + return true, []byte{0}, nil +} + +func (c *ConditionalIndex) FromArgs(args ...interface{}) ([]byte, error) { + return fromBoolArgs(args) +} + +// fromBoolArgs is a helper that expects only a single boolean argument and +// returns a single length byte array containing either a one or zero depending +// on whether the passed input is true or false respectively. +func fromBoolArgs(args []interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + + if val, ok := args[0].(bool); !ok { + return nil, fmt.Errorf("argument must be a boolean type: %#v", args[0]) + } else if val { + return []byte{1}, nil + } + + return []byte{0}, nil +} + +// CompoundIndex is used to build an index using multiple sub-indexes +// Prefix based iteration is supported as long as the appropriate prefix +// of indexers support it. All sub-indexers are only assumed to expect +// a single argument. +type CompoundIndex struct { + Indexes []Indexer + + // AllowMissing results in an index based on only the indexers + // that return data. If true, you may end up with 2/3 columns + // indexed which might be useful for an index scan. Otherwise, + // the CompoundIndex requires all indexers to be satisfied. + AllowMissing bool +} + +func (c *CompoundIndex) FromObject(raw interface{}) (bool, []byte, error) { + var out []byte + for i, idxRaw := range c.Indexes { + idx, ok := idxRaw.(SingleIndexer) + if !ok { + return false, nil, fmt.Errorf("sub-index %d error: %s", i, "sub-index must be a SingleIndexer") + } + ok, val, err := idx.FromObject(raw) + if err != nil { + return false, nil, fmt.Errorf("sub-index %d error: %v", i, err) + } + if !ok { + if c.AllowMissing { + break + } else { + return false, nil, nil + } + } + out = append(out, val...) + } + return true, out, nil +} + +func (c *CompoundIndex) FromArgs(args ...interface{}) ([]byte, error) { + if len(args) != len(c.Indexes) { + return nil, fmt.Errorf("non-equivalent argument count and index fields") + } + var out []byte + for i, arg := range args { + val, err := c.Indexes[i].FromArgs(arg) + if err != nil { + return nil, fmt.Errorf("sub-index %d error: %v", i, err) + } + out = append(out, val...) + } + return out, nil +} + +func (c *CompoundIndex) PrefixFromArgs(args ...interface{}) ([]byte, error) { + if len(args) > len(c.Indexes) { + return nil, fmt.Errorf("more arguments than index fields") + } + var out []byte + for i, arg := range args { + if i+1 < len(args) { + val, err := c.Indexes[i].FromArgs(arg) + if err != nil { + return nil, fmt.Errorf("sub-index %d error: %v", i, err) + } + out = append(out, val...) + } else { + prefixIndexer, ok := c.Indexes[i].(PrefixIndexer) + if !ok { + return nil, fmt.Errorf("sub-index %d does not support prefix scanning", i) + } + val, err := prefixIndexer.PrefixFromArgs(arg) + if err != nil { + return nil, fmt.Errorf("sub-index %d error: %v", i, err) + } + out = append(out, val...) + } + } + return out, nil +} + +// CompoundMultiIndex is used to build an index using multiple +// sub-indexes. +// +// Unlike CompoundIndex, CompoundMultiIndex can have both +// SingleIndexer and MultiIndexer sub-indexers. However, each +// MultiIndexer adds considerable overhead/complexity in terms of +// the number of indexes created under-the-hood. It is not suggested +// to use more than one or two, if possible. +// +// Another change from CompoundIndexer is that if AllowMissing is +// set, not only is it valid to have empty index fields, but it will +// still create index values up to the first empty index. This means +// that if you have a value with an empty field, rather than using a +// prefix for lookup, you can simply pass in less arguments. As an +// example, if {Foo, Bar} is indexed but Bar is missing for a value +// and AllowMissing is set, an index will still be created for {Foo} +// and it is valid to do a lookup passing in only Foo as an argument. +// Note that the ordering isn't guaranteed -- it's last-insert wins, +// but this is true if you have two objects that have the same +// indexes not using AllowMissing anyways. +// +// Because StringMapFieldIndexers can take a varying number of args, +// it is currently a requirement that whenever it is used, two +// arguments must _always_ be provided for it. In theory we only +// need one, except a bug in that indexer means the single-argument +// version will never work. You can leave the second argument nil, +// but it will never produce a value. We support this for whenever +// that bug is fixed, likely in a next major version bump. +// +// Prefix-based indexing is not currently supported. +type CompoundMultiIndex struct { + Indexes []Indexer + + // AllowMissing results in an index based on only the indexers + // that return data. If true, you may end up with 2/3 columns + // indexed which might be useful for an index scan. Otherwise, + // CompoundMultiIndex requires all indexers to be satisfied. + AllowMissing bool +} + +func (c *CompoundMultiIndex) FromObject(raw interface{}) (bool, [][]byte, error) { + // At each entry, builder is storing the results from the next index + builder := make([][][]byte, 0, len(c.Indexes)) + +forloop: + // This loop goes through each indexer and adds the value(s) provided to the next + // entry in the slice. We can then later walk it like a tree to construct the indices. + for i, idxRaw := range c.Indexes { + switch idx := idxRaw.(type) { + case SingleIndexer: + ok, val, err := idx.FromObject(raw) + if err != nil { + return false, nil, fmt.Errorf("single sub-index %d error: %v", i, err) + } + if !ok { + if c.AllowMissing { + break forloop + } else { + return false, nil, nil + } + } + builder = append(builder, [][]byte{val}) + + case MultiIndexer: + ok, vals, err := idx.FromObject(raw) + if err != nil { + return false, nil, fmt.Errorf("multi sub-index %d error: %v", i, err) + } + if !ok { + if c.AllowMissing { + break forloop + } else { + return false, nil, nil + } + } + + // Add each of the new values to each of the old values + builder = append(builder, vals) + + default: + return false, nil, fmt.Errorf("sub-index %d does not satisfy either SingleIndexer or MultiIndexer", i) + } + } + + // Start with something higher to avoid resizing if possible + out := make([][]byte, 0, len(c.Indexes)^3) + + // We are walking through the builder slice essentially in a depth-first fashion, + // building the prefix and leaves as we go. If AllowMissing is false, we only insert + // these full paths to leaves. Otherwise, we also insert each prefix along the way. + // This allows for lookup in FromArgs when AllowMissing is true that does not contain + // the full set of arguments. e.g. for {Foo, Bar} where an object has only the Foo + // field specified as "abc", it is valid to call FromArgs with just "abc". + var walkVals func([]byte, int) + walkVals = func(currPrefix []byte, depth int) { + if depth >= len(builder) { + return + } + + if depth == len(builder)-1 { + // These are the "leaves", so append directly + for _, v := range builder[depth] { + outcome := make([]byte, len(currPrefix)) + copy(outcome, currPrefix) + out = append(out, append(outcome, v...)) + } + return + } + for _, v := range builder[depth] { + nextPrefix := append(currPrefix, v...) + if c.AllowMissing { + out = append(out, nextPrefix) + } + walkVals(nextPrefix, depth+1) + } + } + + walkVals(nil, 0) + + return true, out, nil +} + +func (c *CompoundMultiIndex) FromArgs(args ...interface{}) ([]byte, error) { + var stringMapCount int + var argCount int + for _, index := range c.Indexes { + if argCount >= len(args) { + break + } + if _, ok := index.(*StringMapFieldIndex); ok { + // We require pairs for StringMapFieldIndex, but only got one + if argCount+1 >= len(args) { + return nil, errors.New("invalid number of arguments") + } + stringMapCount++ + argCount += 2 + } else { + argCount++ + } + } + argCount = 0 + + switch c.AllowMissing { + case true: + if len(args) > len(c.Indexes)+stringMapCount { + return nil, errors.New("too many arguments") + } + + default: + if len(args) != len(c.Indexes)+stringMapCount { + return nil, errors.New("number of arguments does not equal number of indexers") + } + } + + var out []byte + var val []byte + var err error + for i, idx := range c.Indexes { + if argCount >= len(args) { + // We're done; should only hit this if AllowMissing + break + } + if _, ok := idx.(*StringMapFieldIndex); ok { + if args[argCount+1] == nil { + val, err = idx.FromArgs(args[argCount]) + } else { + val, err = idx.FromArgs(args[argCount : argCount+2]...) + } + argCount += 2 + } else { + val, err = idx.FromArgs(args[argCount]) + argCount++ + } + if err != nil { + return nil, fmt.Errorf("sub-index %d error: %v", i, err) + } + out = append(out, val...) + } + return out, nil +} diff --git a/vendor/github.com/hashicorp/go-memdb/memdb.go b/vendor/github.com/hashicorp/go-memdb/memdb.go new file mode 100644 index 000000000..0508d0aae --- /dev/null +++ b/vendor/github.com/hashicorp/go-memdb/memdb.go @@ -0,0 +1,116 @@ +// Package memdb provides an in-memory database that supports transactions +// and MVCC. +package memdb + +import ( + "sync" + "sync/atomic" + "unsafe" + + "github.com/hashicorp/go-immutable-radix" +) + +// MemDB is an in-memory database providing Atomicity, Consistency, and +// Isolation from ACID. MemDB doesn't provide Durability since it is an +// in-memory database. +// +// MemDB provides a table abstraction to store objects (rows) with multiple +// indexes based on inserted values. The database makes use of immutable radix +// trees to provide transactions and MVCC. +// +// Objects inserted into MemDB are not copied. It is **extremely important** +// that objects are not modified in-place after they are inserted since they +// are stored directly in MemDB. It remains unsafe to modify inserted objects +// even after they've been deleted from MemDB since there may still be older +// snapshots of the DB being read from other goroutines. +type MemDB struct { + schema *DBSchema + root unsafe.Pointer // *iradix.Tree underneath + primary bool + + // There can only be a single writer at once + writer sync.Mutex +} + +// NewMemDB creates a new MemDB with the given schema. +func NewMemDB(schema *DBSchema) (*MemDB, error) { + // Validate the schema + if err := schema.Validate(); err != nil { + return nil, err + } + + // Create the MemDB + db := &MemDB{ + schema: schema, + root: unsafe.Pointer(iradix.New()), + primary: true, + } + if err := db.initialize(); err != nil { + return nil, err + } + + return db, nil +} + +// DBSchema returns schema in use for introspection. +// +// The method is intended for *read-only* debugging use cases, +// returned schema should *never be modified in-place*. +func (db *MemDB) DBSchema() *DBSchema { + return db.schema +} + +// getRoot is used to do an atomic load of the root pointer +func (db *MemDB) getRoot() *iradix.Tree { + root := (*iradix.Tree)(atomic.LoadPointer(&db.root)) + return root +} + +// Txn is used to start a new transaction in either read or write mode. +// There can only be a single concurrent writer, but any number of readers. +func (db *MemDB) Txn(write bool) *Txn { + if write { + db.writer.Lock() + } + txn := &Txn{ + db: db, + write: write, + rootTxn: db.getRoot().Txn(), + } + return txn +} + +// Snapshot is used to capture a point-in-time snapshot of the database that +// will not be affected by any write operations to the existing DB. +// +// If MemDB is storing reference-based values (pointers, maps, slices, etc.), +// the Snapshot will not deep copy those values. Therefore, it is still unsafe +// to modify any inserted values in either DB. +func (db *MemDB) Snapshot() *MemDB { + clone := &MemDB{ + schema: db.schema, + root: unsafe.Pointer(db.getRoot()), + primary: false, + } + return clone +} + +// initialize is used to setup the DB for use after creation. This should +// be called only once after allocating a MemDB. +func (db *MemDB) initialize() error { + root := db.getRoot() + for tName, tableSchema := range db.schema.Tables { + for iName := range tableSchema.Indexes { + index := iradix.New() + path := indexPath(tName, iName) + root, _, _ = root.Insert(path, index) + } + } + db.root = unsafe.Pointer(root) + return nil +} + +// indexPath returns the path from the root to the given table index +func indexPath(table, index string) []byte { + return []byte(table + "." + index) +} diff --git a/vendor/github.com/hashicorp/go-memdb/schema.go b/vendor/github.com/hashicorp/go-memdb/schema.go new file mode 100644 index 000000000..e6a9b526b --- /dev/null +++ b/vendor/github.com/hashicorp/go-memdb/schema.go @@ -0,0 +1,114 @@ +package memdb + +import "fmt" + +// DBSchema is the schema to use for the full database with a MemDB instance. +// +// MemDB will require a valid schema. Schema validation can be tested using +// the Validate function. Calling this function is recommended in unit tests. +type DBSchema struct { + // Tables is the set of tables within this database. The key is the + // table name and must match the Name in TableSchema. + Tables map[string]*TableSchema +} + +// Validate validates the schema. +func (s *DBSchema) Validate() error { + if s == nil { + return fmt.Errorf("schema is nil") + } + + if len(s.Tables) == 0 { + return fmt.Errorf("schema has no tables defined") + } + + for name, table := range s.Tables { + if name != table.Name { + return fmt.Errorf("table name mis-match for '%s'", name) + } + + if err := table.Validate(); err != nil { + return fmt.Errorf("table %q: %s", name, err) + } + } + + return nil +} + +// TableSchema is the schema for a single table. +type TableSchema struct { + // Name of the table. This must match the key in the Tables map in DBSchema. + Name string + + // Indexes is the set of indexes for querying this table. The key + // is a unique name for the index and must match the Name in the + // IndexSchema. + Indexes map[string]*IndexSchema +} + +// Validate is used to validate the table schema +func (s *TableSchema) Validate() error { + if s.Name == "" { + return fmt.Errorf("missing table name") + } + + if len(s.Indexes) == 0 { + return fmt.Errorf("missing table indexes for '%s'", s.Name) + } + + if _, ok := s.Indexes["id"]; !ok { + return fmt.Errorf("must have id index") + } + + if !s.Indexes["id"].Unique { + return fmt.Errorf("id index must be unique") + } + + if _, ok := s.Indexes["id"].Indexer.(SingleIndexer); !ok { + return fmt.Errorf("id index must be a SingleIndexer") + } + + for name, index := range s.Indexes { + if name != index.Name { + return fmt.Errorf("index name mis-match for '%s'", name) + } + + if err := index.Validate(); err != nil { + return fmt.Errorf("index %q: %s", name, err) + } + } + + return nil +} + +// IndexSchema is the schema for an index. An index defines how a table is +// queried. +type IndexSchema struct { + // Name of the index. This must be unique among a tables set of indexes. + // This must match the key in the map of Indexes for a TableSchema. + Name string + + // AllowMissing if true ignores this index if it doesn't produce a + // value. For example, an index that extracts a field that doesn't + // exist from a structure. + AllowMissing bool + + Unique bool + Indexer Indexer +} + +func (s *IndexSchema) Validate() error { + if s.Name == "" { + return fmt.Errorf("missing index name") + } + if s.Indexer == nil { + return fmt.Errorf("missing index function for '%s'", s.Name) + } + switch s.Indexer.(type) { + case SingleIndexer: + case MultiIndexer: + default: + return fmt.Errorf("indexer for '%s' must be a SingleIndexer or MultiIndexer", s.Name) + } + return nil +} diff --git a/vendor/github.com/hashicorp/go-memdb/txn.go b/vendor/github.com/hashicorp/go-memdb/txn.go new file mode 100644 index 000000000..951c2a1d9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-memdb/txn.go @@ -0,0 +1,1021 @@ +package memdb + +import ( + "bytes" + "fmt" + "strings" + "sync/atomic" + "unsafe" + + iradix "github.com/hashicorp/go-immutable-radix" +) + +const ( + id = "id" +) + +var ( + // ErrNotFound is returned when the requested item is not found + ErrNotFound = fmt.Errorf("not found") +) + +// tableIndex is a tuple of (Table, Index) used for lookups +type tableIndex struct { + Table string + Index string +} + +// Txn is a transaction against a MemDB. +// This can be a read or write transaction. +type Txn struct { + db *MemDB + write bool + rootTxn *iradix.Txn + after []func() + + // changes is used to track the changes performed during the transaction. If + // it is nil at transaction start then changes are not tracked. + changes Changes + + modified map[tableIndex]*iradix.Txn +} + +// TrackChanges enables change tracking for the transaction. If called at any +// point before commit, subsequent mutations will be recorded and can be +// retrieved using ChangeSet. Once this has been called on a transaction it +// can't be unset. As with other Txn methods it's not safe to call this from a +// different goroutine than the one making mutations or committing the +// transaction. +func (txn *Txn) TrackChanges() { + if txn.changes == nil { + txn.changes = make(Changes, 0, 1) + } +} + +// readableIndex returns a transaction usable for reading the given index in a +// table. If the transaction is a write transaction with modifications, a clone of the +// modified index will be returned. +func (txn *Txn) readableIndex(table, index string) *iradix.Txn { + // Look for existing transaction + if txn.write && txn.modified != nil { + key := tableIndex{table, index} + exist, ok := txn.modified[key] + if ok { + return exist.Clone() + } + } + + // Create a read transaction + path := indexPath(table, index) + raw, _ := txn.rootTxn.Get(path) + indexTxn := raw.(*iradix.Tree).Txn() + return indexTxn +} + +// writableIndex returns a transaction usable for modifying the +// given index in a table. +func (txn *Txn) writableIndex(table, index string) *iradix.Txn { + if txn.modified == nil { + txn.modified = make(map[tableIndex]*iradix.Txn) + } + + // Look for existing transaction + key := tableIndex{table, index} + exist, ok := txn.modified[key] + if ok { + return exist + } + + // Start a new transaction + path := indexPath(table, index) + raw, _ := txn.rootTxn.Get(path) + indexTxn := raw.(*iradix.Tree).Txn() + + // If we are the primary DB, enable mutation tracking. Snapshots should + // not notify, otherwise we will trigger watches on the primary DB when + // the writes will not be visible. + indexTxn.TrackMutate(txn.db.primary) + + // Keep this open for the duration of the txn + txn.modified[key] = indexTxn + return indexTxn +} + +// Abort is used to cancel this transaction. +// This is a noop for read transactions, +// already aborted or commited transactions. +func (txn *Txn) Abort() { + // Noop for a read transaction + if !txn.write { + return + } + + // Check if already aborted or committed + if txn.rootTxn == nil { + return + } + + // Clear the txn + txn.rootTxn = nil + txn.modified = nil + txn.changes = nil + + // Release the writer lock since this is invalid + txn.db.writer.Unlock() +} + +// Commit is used to finalize this transaction. +// This is a noop for read transactions, +// already aborted or committed transactions. +func (txn *Txn) Commit() { + // Noop for a read transaction + if !txn.write { + return + } + + // Check if already aborted or committed + if txn.rootTxn == nil { + return + } + + // Commit each sub-transaction scoped to (table, index) + for key, subTxn := range txn.modified { + path := indexPath(key.Table, key.Index) + final := subTxn.CommitOnly() + txn.rootTxn.Insert(path, final) + } + + // Update the root of the DB + newRoot := txn.rootTxn.CommitOnly() + atomic.StorePointer(&txn.db.root, unsafe.Pointer(newRoot)) + + // Now issue all of the mutation updates (this is safe to call + // even if mutation tracking isn't enabled); we do this after + // the root pointer is swapped so that waking responders will + // see the new state. + for _, subTxn := range txn.modified { + subTxn.Notify() + } + txn.rootTxn.Notify() + + // Clear the txn + txn.rootTxn = nil + txn.modified = nil + + // Release the writer lock since this is invalid + txn.db.writer.Unlock() + + // Run the deferred functions, if any + for i := len(txn.after); i > 0; i-- { + fn := txn.after[i-1] + fn() + } +} + +// Insert is used to add or update an object into the given table. +// +// When updating an object, the obj provided should be a copy rather +// than a value updated in-place. Modifying values in-place that are already +// inserted into MemDB is not supported behavior. +func (txn *Txn) Insert(table string, obj interface{}) error { + if !txn.write { + return fmt.Errorf("cannot insert in read-only transaction") + } + + // Get the table schema + tableSchema, ok := txn.db.schema.Tables[table] + if !ok { + return fmt.Errorf("invalid table '%s'", table) + } + + // Get the primary ID of the object + idSchema := tableSchema.Indexes[id] + idIndexer := idSchema.Indexer.(SingleIndexer) + ok, idVal, err := idIndexer.FromObject(obj) + if err != nil { + return fmt.Errorf("failed to build primary index: %v", err) + } + if !ok { + return fmt.Errorf("object missing primary index") + } + + // Lookup the object by ID first, to see if this is an update + idTxn := txn.writableIndex(table, id) + existing, update := idTxn.Get(idVal) + + // On an update, there is an existing object with the given + // primary ID. We do the update by deleting the current object + // and inserting the new object. + for name, indexSchema := range tableSchema.Indexes { + indexTxn := txn.writableIndex(table, name) + + // Determine the new index value + var ( + ok bool + vals [][]byte + err error + ) + switch indexer := indexSchema.Indexer.(type) { + case SingleIndexer: + var val []byte + ok, val, err = indexer.FromObject(obj) + vals = [][]byte{val} + case MultiIndexer: + ok, vals, err = indexer.FromObject(obj) + } + if err != nil { + return fmt.Errorf("failed to build index '%s': %v", name, err) + } + + // Handle non-unique index by computing a unique index. + // This is done by appending the primary key which must + // be unique anyways. + if ok && !indexSchema.Unique { + for i := range vals { + vals[i] = append(vals[i], idVal...) + } + } + + // Handle the update by deleting from the index first + if update { + var ( + okExist bool + valsExist [][]byte + err error + ) + switch indexer := indexSchema.Indexer.(type) { + case SingleIndexer: + var valExist []byte + okExist, valExist, err = indexer.FromObject(existing) + valsExist = [][]byte{valExist} + case MultiIndexer: + okExist, valsExist, err = indexer.FromObject(existing) + } + if err != nil { + return fmt.Errorf("failed to build index '%s': %v", name, err) + } + if okExist { + for i, valExist := range valsExist { + // Handle non-unique index by computing a unique index. + // This is done by appending the primary key which must + // be unique anyways. + if !indexSchema.Unique { + valExist = append(valExist, idVal...) + } + + // If we are writing to the same index with the same value, + // we can avoid the delete as the insert will overwrite the + // value anyways. + if i >= len(vals) || !bytes.Equal(valExist, vals[i]) { + indexTxn.Delete(valExist) + } + } + } + } + + // If there is no index value, either this is an error or an expected + // case and we can skip updating + if !ok { + if indexSchema.AllowMissing { + continue + } else { + return fmt.Errorf("missing value for index '%s'", name) + } + } + + // Update the value of the index + for _, val := range vals { + indexTxn.Insert(val, obj) + } + } + if txn.changes != nil { + txn.changes = append(txn.changes, Change{ + Table: table, + Before: existing, // might be nil on a create + After: obj, + primaryKey: idVal, + }) + } + return nil +} + +// Delete is used to delete a single object from the given table. +// This object must already exist in the table. +func (txn *Txn) Delete(table string, obj interface{}) error { + if !txn.write { + return fmt.Errorf("cannot delete in read-only transaction") + } + + // Get the table schema + tableSchema, ok := txn.db.schema.Tables[table] + if !ok { + return fmt.Errorf("invalid table '%s'", table) + } + + // Get the primary ID of the object + idSchema := tableSchema.Indexes[id] + idIndexer := idSchema.Indexer.(SingleIndexer) + ok, idVal, err := idIndexer.FromObject(obj) + if err != nil { + return fmt.Errorf("failed to build primary index: %v", err) + } + if !ok { + return fmt.Errorf("object missing primary index") + } + + // Lookup the object by ID first, check if we should continue + idTxn := txn.writableIndex(table, id) + existing, ok := idTxn.Get(idVal) + if !ok { + return ErrNotFound + } + + // Remove the object from all the indexes + for name, indexSchema := range tableSchema.Indexes { + indexTxn := txn.writableIndex(table, name) + + // Handle the update by deleting from the index first + var ( + ok bool + vals [][]byte + err error + ) + switch indexer := indexSchema.Indexer.(type) { + case SingleIndexer: + var val []byte + ok, val, err = indexer.FromObject(existing) + vals = [][]byte{val} + case MultiIndexer: + ok, vals, err = indexer.FromObject(existing) + } + if err != nil { + return fmt.Errorf("failed to build index '%s': %v", name, err) + } + if ok { + // Handle non-unique index by computing a unique index. + // This is done by appending the primary key which must + // be unique anyways. + for _, val := range vals { + if !indexSchema.Unique { + val = append(val, idVal...) + } + indexTxn.Delete(val) + } + } + } + if txn.changes != nil { + txn.changes = append(txn.changes, Change{ + Table: table, + Before: existing, + After: nil, // Now nil indicates deletion + primaryKey: idVal, + }) + } + return nil +} + +// DeletePrefix is used to delete an entire subtree based on a prefix. +// The given index must be a prefix index, and will be used to perform a scan and enumerate the set of objects to delete. +// These will be removed from all other indexes, and then a special prefix operation will delete the objects from the given index in an efficient subtree delete operation. +// This is useful when you have a very large number of objects indexed by the given index, along with a much smaller number of entries in the other indexes for those objects. +func (txn *Txn) DeletePrefix(table string, prefix_index string, prefix string) (bool, error) { + if !txn.write { + return false, fmt.Errorf("cannot delete in read-only transaction") + } + + if !strings.HasSuffix(prefix_index, "_prefix") { + return false, fmt.Errorf("Index name for DeletePrefix must be a prefix index, Got %v ", prefix_index) + } + + deletePrefixIndex := strings.TrimSuffix(prefix_index, "_prefix") + + // Get an iterator over all of the keys with the given prefix. + entries, err := txn.Get(table, prefix_index, prefix) + if err != nil { + return false, fmt.Errorf("failed kvs lookup: %s", err) + } + // Get the table schema + tableSchema, ok := txn.db.schema.Tables[table] + if !ok { + return false, fmt.Errorf("invalid table '%s'", table) + } + + foundAny := false + for entry := entries.Next(); entry != nil; entry = entries.Next() { + if !foundAny { + foundAny = true + } + // Get the primary ID of the object + idSchema := tableSchema.Indexes[id] + idIndexer := idSchema.Indexer.(SingleIndexer) + ok, idVal, err := idIndexer.FromObject(entry) + if err != nil { + return false, fmt.Errorf("failed to build primary index: %v", err) + } + if !ok { + return false, fmt.Errorf("object missing primary index") + } + if txn.changes != nil { + // Record the deletion + idTxn := txn.writableIndex(table, id) + existing, ok := idTxn.Get(idVal) + if ok { + txn.changes = append(txn.changes, Change{ + Table: table, + Before: existing, + After: nil, // Now nil indicates deletion + primaryKey: idVal, + }) + } + } + // Remove the object from all the indexes except the given prefix index + for name, indexSchema := range tableSchema.Indexes { + if name == deletePrefixIndex { + continue + } + indexTxn := txn.writableIndex(table, name) + + // Handle the update by deleting from the index first + var ( + ok bool + vals [][]byte + err error + ) + switch indexer := indexSchema.Indexer.(type) { + case SingleIndexer: + var val []byte + ok, val, err = indexer.FromObject(entry) + vals = [][]byte{val} + case MultiIndexer: + ok, vals, err = indexer.FromObject(entry) + } + if err != nil { + return false, fmt.Errorf("failed to build index '%s': %v", name, err) + } + + if ok { + // Handle non-unique index by computing a unique index. + // This is done by appending the primary key which must + // be unique anyways. + for _, val := range vals { + if !indexSchema.Unique { + val = append(val, idVal...) + } + indexTxn.Delete(val) + } + } + } + + } + if foundAny { + indexTxn := txn.writableIndex(table, deletePrefixIndex) + ok = indexTxn.DeletePrefix([]byte(prefix)) + if !ok { + panic(fmt.Errorf("prefix %v matched some entries but DeletePrefix did not delete any ", prefix)) + } + return true, nil + } + return false, nil +} + +// DeleteAll is used to delete all the objects in a given table +// matching the constraints on the index +func (txn *Txn) DeleteAll(table, index string, args ...interface{}) (int, error) { + if !txn.write { + return 0, fmt.Errorf("cannot delete in read-only transaction") + } + + // Get all the objects + iter, err := txn.Get(table, index, args...) + if err != nil { + return 0, err + } + + // Put them into a slice so there are no safety concerns while actually + // performing the deletes + var objs []interface{} + for { + obj := iter.Next() + if obj == nil { + break + } + + objs = append(objs, obj) + } + + // Do the deletes + num := 0 + for _, obj := range objs { + if err := txn.Delete(table, obj); err != nil { + return num, err + } + num++ + } + return num, nil +} + +// FirstWatch is used to return the first matching object for +// the given constraints on the index along with the watch channel. +// +// Note that all values read in the transaction form a consistent snapshot +// from the time when the transaction was created. +// +// The watch channel is closed when a subsequent write transaction +// has updated the result of the query. Since each read transaction +// operates on an isolated snapshot, a new read transaction must be +// started to observe the changes that have been made. +// +// If the value of index ends with "_prefix", FirstWatch will perform a prefix +// match instead of full match on the index. The registered indexer must implement +// PrefixIndexer, otherwise an error is returned. +func (txn *Txn) FirstWatch(table, index string, args ...interface{}) (<-chan struct{}, interface{}, error) { + // Get the index value + indexSchema, val, err := txn.getIndexValue(table, index, args...) + if err != nil { + return nil, nil, err + } + + // Get the index itself + indexTxn := txn.readableIndex(table, indexSchema.Name) + + // Do an exact lookup + if indexSchema.Unique && val != nil && indexSchema.Name == index { + watch, obj, ok := indexTxn.GetWatch(val) + if !ok { + return watch, nil, nil + } + return watch, obj, nil + } + + // Handle non-unique index by using an iterator and getting the first value + iter := indexTxn.Root().Iterator() + watch := iter.SeekPrefixWatch(val) + _, value, _ := iter.Next() + return watch, value, nil +} + +// LastWatch is used to return the last matching object for +// the given constraints on the index along with the watch channel. +// +// Note that all values read in the transaction form a consistent snapshot +// from the time when the transaction was created. +// +// The watch channel is closed when a subsequent write transaction +// has updated the result of the query. Since each read transaction +// operates on an isolated snapshot, a new read transaction must be +// started to observe the changes that have been made. +// +// If the value of index ends with "_prefix", LastWatch will perform a prefix +// match instead of full match on the index. The registered indexer must implement +// PrefixIndexer, otherwise an error is returned. +func (txn *Txn) LastWatch(table, index string, args ...interface{}) (<-chan struct{}, interface{}, error) { + // Get the index value + indexSchema, val, err := txn.getIndexValue(table, index, args...) + if err != nil { + return nil, nil, err + } + + // Get the index itself + indexTxn := txn.readableIndex(table, indexSchema.Name) + + // Do an exact lookup + if indexSchema.Unique && val != nil && indexSchema.Name == index { + watch, obj, ok := indexTxn.GetWatch(val) + if !ok { + return watch, nil, nil + } + return watch, obj, nil + } + + // Handle non-unique index by using an iterator and getting the last value + iter := indexTxn.Root().ReverseIterator() + watch := iter.SeekPrefixWatch(val) + _, value, _ := iter.Previous() + return watch, value, nil +} + +// First is used to return the first matching object for +// the given constraints on the index. +// +// Note that all values read in the transaction form a consistent snapshot +// from the time when the transaction was created. +func (txn *Txn) First(table, index string, args ...interface{}) (interface{}, error) { + _, val, err := txn.FirstWatch(table, index, args...) + return val, err +} + +// Last is used to return the last matching object for +// the given constraints on the index. +// +// Note that all values read in the transaction form a consistent snapshot +// from the time when the transaction was created. +func (txn *Txn) Last(table, index string, args ...interface{}) (interface{}, error) { + _, val, err := txn.LastWatch(table, index, args...) + return val, err +} + +// LongestPrefix is used to fetch the longest prefix match for the given +// constraints on the index. Note that this will not work with the memdb +// StringFieldIndex because it adds null terminators which prevent the +// algorithm from correctly finding a match (it will get to right before the +// null and fail to find a leaf node). This should only be used where the prefix +// given is capable of matching indexed entries directly, which typically only +// applies to a custom indexer. See the unit test for an example. +// +// Note that all values read in the transaction form a consistent snapshot +// from the time when the transaction was created. +func (txn *Txn) LongestPrefix(table, index string, args ...interface{}) (interface{}, error) { + // Enforce that this only works on prefix indexes. + if !strings.HasSuffix(index, "_prefix") { + return nil, fmt.Errorf("must use '%s_prefix' on index", index) + } + + // Get the index value. + indexSchema, val, err := txn.getIndexValue(table, index, args...) + if err != nil { + return nil, err + } + + // This algorithm only makes sense against a unique index, otherwise the + // index keys will have the IDs appended to them. + if !indexSchema.Unique { + return nil, fmt.Errorf("index '%s' is not unique", index) + } + + // Find the longest prefix match with the given index. + indexTxn := txn.readableIndex(table, indexSchema.Name) + if _, value, ok := indexTxn.Root().LongestPrefix(val); ok { + return value, nil + } + return nil, nil +} + +// getIndexValue is used to get the IndexSchema and the value +// used to scan the index given the parameters. This handles prefix based +// scans when the index has the "_prefix" suffix. The index must support +// prefix iteration. +func (txn *Txn) getIndexValue(table, index string, args ...interface{}) (*IndexSchema, []byte, error) { + // Get the table schema + tableSchema, ok := txn.db.schema.Tables[table] + if !ok { + return nil, nil, fmt.Errorf("invalid table '%s'", table) + } + + // Check for a prefix scan + prefixScan := false + if strings.HasSuffix(index, "_prefix") { + index = strings.TrimSuffix(index, "_prefix") + prefixScan = true + } + + // Get the index schema + indexSchema, ok := tableSchema.Indexes[index] + if !ok { + return nil, nil, fmt.Errorf("invalid index '%s'", index) + } + + // Hot-path for when there are no arguments + if len(args) == 0 { + return indexSchema, nil, nil + } + + // Special case the prefix scanning + if prefixScan { + prefixIndexer, ok := indexSchema.Indexer.(PrefixIndexer) + if !ok { + return indexSchema, nil, + fmt.Errorf("index '%s' does not support prefix scanning", index) + } + + val, err := prefixIndexer.PrefixFromArgs(args...) + if err != nil { + return indexSchema, nil, fmt.Errorf("index error: %v", err) + } + return indexSchema, val, err + } + + // Get the exact match index + val, err := indexSchema.Indexer.FromArgs(args...) + if err != nil { + return indexSchema, nil, fmt.Errorf("index error: %v", err) + } + return indexSchema, val, err +} + +// ResultIterator is used to iterate over a list of results from a query on a table. +// +// When a ResultIterator is created from a write transaction, the results from +// Next will reflect a snapshot of the table at the time the ResultIterator is +// created. +// This means that calling Insert or Delete on a transaction while iterating is +// allowed, but the changes made by Insert or Delete will not be observed in the +// results returned from subsequent calls to Next. For example if an item is deleted +// from the index used by the iterator it will still be returned by Next. If an +// item is inserted into the index used by the iterator, it will not be returned +// by Next. However, an iterator created after a call to Insert or Delete will +// reflect the modifications. +// +// When a ResultIterator is created from a write transaction, and there are already +// modifications to the index used by the iterator, the modification cache of the +// index will be invalidated. This may result in some additional allocations if +// the same node in the index is modified again. +type ResultIterator interface { + WatchCh() <-chan struct{} + // Next returns the next result from the iterator. If there are no more results + // nil is returned. + Next() interface{} +} + +// Get is used to construct a ResultIterator over all the rows that match the +// given constraints of an index. The index values must match exactly (this +// is not a range-based or prefix-based lookup) by default. +// +// Prefix lookups: if the named index implements PrefixIndexer, you may perform +// prefix-based lookups by appending "_prefix" to the index name. In this +// scenario, the index values given in args are treated as prefix lookups. For +// example, a StringFieldIndex will match any string with the given value +// as a prefix: "mem" matches "memdb". +// +// See the documentation for ResultIterator to understand the behaviour of the +// returned ResultIterator. +func (txn *Txn) Get(table, index string, args ...interface{}) (ResultIterator, error) { + indexIter, val, err := txn.getIndexIterator(table, index, args...) + if err != nil { + return nil, err + } + + // Seek the iterator to the appropriate sub-set + watchCh := indexIter.SeekPrefixWatch(val) + + // Create an iterator + iter := &radixIterator{ + iter: indexIter, + watchCh: watchCh, + } + return iter, nil +} + +// GetReverse is used to construct a Reverse ResultIterator over all the +// rows that match the given constraints of an index. +// The returned ResultIterator's Next() will return the next Previous value. +// +// See the documentation on Get for details on arguments. +// +// See the documentation for ResultIterator to understand the behaviour of the +// returned ResultIterator. +func (txn *Txn) GetReverse(table, index string, args ...interface{}) (ResultIterator, error) { + indexIter, val, err := txn.getIndexIteratorReverse(table, index, args...) + if err != nil { + return nil, err + } + + // Seek the iterator to the appropriate sub-set + watchCh := indexIter.SeekPrefixWatch(val) + + // Create an iterator + iter := &radixReverseIterator{ + iter: indexIter, + watchCh: watchCh, + } + return iter, nil +} + +// LowerBound is used to construct a ResultIterator over all the the range of +// rows that have an index value greater than or equal to the provide args. +// Calling this then iterating until the rows are larger than required allows +// range scans within an index. It is not possible to watch the resulting +// iterator since the radix tree doesn't efficiently allow watching on lower +// bound changes. The WatchCh returned will be nill and so will block forever. +// +// If the value of index ends with "_prefix", LowerBound will perform a prefix match instead of +// a full match on the index. The registered index must implement PrefixIndexer, +// otherwise an error is returned. +// +// See the documentation for ResultIterator to understand the behaviour of the +// returned ResultIterator. +func (txn *Txn) LowerBound(table, index string, args ...interface{}) (ResultIterator, error) { + indexIter, val, err := txn.getIndexIterator(table, index, args...) + if err != nil { + return nil, err + } + + // Seek the iterator to the appropriate sub-set + indexIter.SeekLowerBound(val) + + // Create an iterator + iter := &radixIterator{ + iter: indexIter, + } + return iter, nil +} + +// ReverseLowerBound is used to construct a Reverse ResultIterator over all the +// the range of rows that have an index value less than or equal to the +// provide args. Calling this then iterating until the rows are lower than +// required allows range scans within an index. It is not possible to watch the +// resulting iterator since the radix tree doesn't efficiently allow watching +// on lower bound changes. The WatchCh returned will be nill and so will block +// forever. +// +// See the documentation for ResultIterator to understand the behaviour of the +// returned ResultIterator. +func (txn *Txn) ReverseLowerBound(table, index string, args ...interface{}) (ResultIterator, error) { + indexIter, val, err := txn.getIndexIteratorReverse(table, index, args...) + if err != nil { + return nil, err + } + + // Seek the iterator to the appropriate sub-set + indexIter.SeekReverseLowerBound(val) + + // Create an iterator + iter := &radixReverseIterator{ + iter: indexIter, + } + return iter, nil +} + +// objectID is a tuple of table name and the raw internal id byte slice +// converted to a string. It's only converted to a string to make it comparable +// so this struct can be used as a map index. +type objectID struct { + Table string + IndexVal string +} + +// mutInfo stores metadata about mutations to allow collapsing multiple +// mutations to the same object into one. +type mutInfo struct { + firstBefore interface{} + lastIdx int +} + +// Changes returns the set of object changes that have been made in the +// transaction so far. If change tracking is not enabled it wil always return +// nil. It can be called before or after Commit. If it is before Commit it will +// return all changes made so far which may not be the same as the final +// Changes. After abort it will always return nil. As with other Txn methods +// it's not safe to call this from a different goroutine than the one making +// mutations or committing the transaction. Mutations will appear in the order +// they were performed in the transaction but multiple operations to the same +// object will be collapsed so only the effective overall change to that object +// is present. If transaction operations are dependent (e.g. copy object X to Y +// then delete X) this might mean the set of mutations is incomplete to verify +// history, but it is complete in that the net effect is preserved (Y got a new +// value, X got removed). +func (txn *Txn) Changes() Changes { + if txn.changes == nil { + return nil + } + + // De-duplicate mutations by key so all take effect at the point of the last + // write but we keep the mutations in order. + dups := make(map[objectID]mutInfo) + for i, m := range txn.changes { + oid := objectID{ + Table: m.Table, + IndexVal: string(m.primaryKey), + } + // Store the latest mutation index for each key value + mi, ok := dups[oid] + if !ok { + // First entry for key, store the before value + mi.firstBefore = m.Before + } + mi.lastIdx = i + dups[oid] = mi + } + if len(dups) == len(txn.changes) { + // No duplicates found, fast path return it as is + return txn.changes + } + + // Need to remove the duplicates + cs := make(Changes, 0, len(dups)) + for i, m := range txn.changes { + oid := objectID{ + Table: m.Table, + IndexVal: string(m.primaryKey), + } + mi := dups[oid] + if mi.lastIdx == i { + // This was the latest value for this key copy it with the before value in + // case it's different. Note that m is not a pointer so we are not + // modifying the txn.changeSet here - it's already a copy. + m.Before = mi.firstBefore + + // Edge case - if the object was inserted and then eventually deleted in + // the same transaction, then the net affect on that key is a no-op. Don't + // emit a mutation with nil for before and after as it's meaningless and + // might violate expectations and cause a panic in code that assumes at + // least one must be set. + if m.Before == nil && m.After == nil { + continue + } + cs = append(cs, m) + } + } + // Store the de-duped version in case this is called again + txn.changes = cs + return cs +} + +func (txn *Txn) getIndexIterator(table, index string, args ...interface{}) (*iradix.Iterator, []byte, error) { + // Get the index value to scan + indexSchema, val, err := txn.getIndexValue(table, index, args...) + if err != nil { + return nil, nil, err + } + + // Get the index itself + indexTxn := txn.readableIndex(table, indexSchema.Name) + indexRoot := indexTxn.Root() + + // Get an iterator over the index + indexIter := indexRoot.Iterator() + return indexIter, val, nil +} + +func (txn *Txn) getIndexIteratorReverse(table, index string, args ...interface{}) (*iradix.ReverseIterator, []byte, error) { + // Get the index value to scan + indexSchema, val, err := txn.getIndexValue(table, index, args...) + if err != nil { + return nil, nil, err + } + + // Get the index itself + indexTxn := txn.readableIndex(table, indexSchema.Name) + indexRoot := indexTxn.Root() + + // Get an interator over the index + indexIter := indexRoot.ReverseIterator() + return indexIter, val, nil +} + +// Defer is used to push a new arbitrary function onto a stack which +// gets called when a transaction is committed and finished. Deferred +// functions are called in LIFO order, and only invoked at the end of +// write transactions. +func (txn *Txn) Defer(fn func()) { + txn.after = append(txn.after, fn) +} + +// radixIterator is used to wrap an underlying iradix iterator. +// This is much more efficient than a sliceIterator as we are not +// materializing the entire view. +type radixIterator struct { + iter *iradix.Iterator + watchCh <-chan struct{} +} + +func (r *radixIterator) WatchCh() <-chan struct{} { + return r.watchCh +} + +func (r *radixIterator) Next() interface{} { + _, value, ok := r.iter.Next() + if !ok { + return nil + } + return value +} + +type radixReverseIterator struct { + iter *iradix.ReverseIterator + watchCh <-chan struct{} +} + +func (r *radixReverseIterator) Next() interface{} { + _, value, ok := r.iter.Previous() + if !ok { + return nil + } + return value +} + +func (r *radixReverseIterator) WatchCh() <-chan struct{} { + return r.watchCh +} + +// Snapshot creates a snapshot of the current state of the transaction. +// Returns a new read-only transaction or nil if the transaction is already +// aborted or committed. +func (txn *Txn) Snapshot() *Txn { + if txn.rootTxn == nil { + return nil + } + + snapshot := &Txn{ + db: txn.db, + rootTxn: txn.rootTxn.Clone(), + } + + // Commit sub-transactions into the snapshot + for key, subTxn := range txn.modified { + path := indexPath(key.Table, key.Index) + final := subTxn.CommitOnly() + snapshot.rootTxn.Insert(path, final) + } + + return snapshot +} diff --git a/vendor/github.com/hashicorp/go-memdb/watch.go b/vendor/github.com/hashicorp/go-memdb/watch.go new file mode 100644 index 000000000..13a4da145 --- /dev/null +++ b/vendor/github.com/hashicorp/go-memdb/watch.go @@ -0,0 +1,152 @@ +package memdb + +import ( + "context" + "time" +) + +// WatchSet is a collection of watch channels. The zero value is not usable. +// Use NewWatchSet to create a WatchSet. +type WatchSet map[<-chan struct{}]struct{} + +// NewWatchSet constructs a new watch set. +func NewWatchSet() WatchSet { + return make(map[<-chan struct{}]struct{}) +} + +// Add appends a watchCh to the WatchSet if non-nil. +func (w WatchSet) Add(watchCh <-chan struct{}) { + if w == nil { + return + } + + if _, ok := w[watchCh]; !ok { + w[watchCh] = struct{}{} + } +} + +// AddWithLimit appends a watchCh to the WatchSet if non-nil, and if the given +// softLimit hasn't been exceeded. Otherwise, it will watch the given alternate +// channel. It's expected that the altCh will be the same on many calls to this +// function, so you will exceed the soft limit a little bit if you hit this, but +// not by much. +// +// This is useful if you want to track individual items up to some limit, after +// which you watch a higher-level channel (usually a channel from start of +// an iterator higher up in the radix tree) that will watch a superset of items. +func (w WatchSet) AddWithLimit(softLimit int, watchCh <-chan struct{}, altCh <-chan struct{}) { + // This is safe for a nil WatchSet so we don't need to check that here. + if len(w) < softLimit { + w.Add(watchCh) + } else { + w.Add(altCh) + } +} + +// Watch blocks until one of the channels in the watch set is closed, or +// timeoutCh sends a value. +// Returns true if timeoutCh is what caused Watch to unblock. +func (w WatchSet) Watch(timeoutCh <-chan time.Time) bool { + if w == nil { + return false + } + + // Create a context that gets cancelled when the timeout is triggered + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + go func() { + select { + case <-timeoutCh: + cancel() + case <-ctx.Done(): + } + }() + + return w.WatchCtx(ctx) == context.Canceled +} + +// WatchCtx blocks until one of the channels in the watch set is closed, or +// ctx is done (cancelled or exceeds the deadline). WatchCtx returns an error +// if the ctx causes it to unblock, otherwise returns nil. +// +// WatchCtx should be preferred over Watch. +func (w WatchSet) WatchCtx(ctx context.Context) error { + if w == nil { + return nil + } + + if n := len(w); n <= aFew { + idx := 0 + chunk := make([]<-chan struct{}, aFew) + for watchCh := range w { + chunk[idx] = watchCh + idx++ + } + return watchFew(ctx, chunk) + } + + return w.watchMany(ctx) +} + +// watchMany is used if there are many watchers. +func (w WatchSet) watchMany(ctx context.Context) error { + // Cancel all watcher goroutines when return. + watcherCtx, cancel := context.WithCancel(ctx) + defer cancel() + + // Set up a goroutine for each watcher. + triggerCh := make(chan struct{}, 1) + watcher := func(chunk []<-chan struct{}) { + if err := watchFew(watcherCtx, chunk); err == nil { + select { + case triggerCh <- struct{}{}: + default: + } + } + } + + // Apportion the watch channels into chunks we can feed into the + // watchFew helper. + idx := 0 + chunk := make([]<-chan struct{}, aFew) + for watchCh := range w { + subIdx := idx % aFew + chunk[subIdx] = watchCh + idx++ + + // Fire off this chunk and start a fresh one. + if idx%aFew == 0 { + go watcher(chunk) + chunk = make([]<-chan struct{}, aFew) + } + } + + // Make sure to watch any residual channels in the last chunk. + if idx%aFew != 0 { + go watcher(chunk) + } + + // Wait for a channel to trigger or timeout. + select { + case <-triggerCh: + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +// WatchCh returns a channel that is used to wait for any channel of the watch set to trigger +// or for the context to be cancelled. WatchCh creates a new goroutine each call, so +// callers may need to cache the returned channel to avoid creating extra goroutines. +func (w WatchSet) WatchCh(ctx context.Context) <-chan error { + // Create the outgoing channel + triggerCh := make(chan error, 1) + + // Create a goroutine to collect the error from WatchCtx + go func() { + triggerCh <- w.WatchCtx(ctx) + }() + + return triggerCh +} diff --git a/vendor/github.com/hashicorp/go-memdb/watch_few.go b/vendor/github.com/hashicorp/go-memdb/watch_few.go new file mode 100644 index 000000000..b211eeea2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-memdb/watch_few.go @@ -0,0 +1,117 @@ +package memdb + +//go:generate sh -c "go run watch-gen/main.go >watch_few.go" + +import ( + "context" +) + +// aFew gives how many watchers this function is wired to support. You must +// always pass a full slice of this length, but unused channels can be nil. +const aFew = 32 + +// watchFew is used if there are only a few watchers as a performance +// optimization. +func watchFew(ctx context.Context, ch []<-chan struct{}) error { + select { + + case <-ch[0]: + return nil + + case <-ch[1]: + return nil + + case <-ch[2]: + return nil + + case <-ch[3]: + return nil + + case <-ch[4]: + return nil + + case <-ch[5]: + return nil + + case <-ch[6]: + return nil + + case <-ch[7]: + return nil + + case <-ch[8]: + return nil + + case <-ch[9]: + return nil + + case <-ch[10]: + return nil + + case <-ch[11]: + return nil + + case <-ch[12]: + return nil + + case <-ch[13]: + return nil + + case <-ch[14]: + return nil + + case <-ch[15]: + return nil + + case <-ch[16]: + return nil + + case <-ch[17]: + return nil + + case <-ch[18]: + return nil + + case <-ch[19]: + return nil + + case <-ch[20]: + return nil + + case <-ch[21]: + return nil + + case <-ch[22]: + return nil + + case <-ch[23]: + return nil + + case <-ch[24]: + return nil + + case <-ch[25]: + return nil + + case <-ch[26]: + return nil + + case <-ch[27]: + return nil + + case <-ch[28]: + return nil + + case <-ch[29]: + return nil + + case <-ch[30]: + return nil + + case <-ch[31]: + return nil + + case <-ctx.Done(): + return ctx.Err() + } +} diff --git a/vendor/github.com/hashicorp/golang-lru/LICENSE b/vendor/github.com/hashicorp/golang-lru/LICENSE new file mode 100644 index 000000000..be2cc4dfb --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/LICENSE @@ -0,0 +1,362 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go new file mode 100644 index 000000000..a86c8539e --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go @@ -0,0 +1,177 @@ +package simplelru + +import ( + "container/list" + "errors" +) + +// EvictCallback is used to get a callback when a cache entry is evicted +type EvictCallback func(key interface{}, value interface{}) + +// LRU implements a non-thread safe fixed size LRU cache +type LRU struct { + size int + evictList *list.List + items map[interface{}]*list.Element + onEvict EvictCallback +} + +// entry is used to hold a value in the evictList +type entry struct { + key interface{} + value interface{} +} + +// NewLRU constructs an LRU of the given size +func NewLRU(size int, onEvict EvictCallback) (*LRU, error) { + if size <= 0 { + return nil, errors.New("Must provide a positive size") + } + c := &LRU{ + size: size, + evictList: list.New(), + items: make(map[interface{}]*list.Element), + onEvict: onEvict, + } + return c, nil +} + +// Purge is used to completely clear the cache. +func (c *LRU) Purge() { + for k, v := range c.items { + if c.onEvict != nil { + c.onEvict(k, v.Value.(*entry).value) + } + delete(c.items, k) + } + c.evictList.Init() +} + +// Add adds a value to the cache. Returns true if an eviction occurred. +func (c *LRU) Add(key, value interface{}) (evicted bool) { + // Check for existing item + if ent, ok := c.items[key]; ok { + c.evictList.MoveToFront(ent) + ent.Value.(*entry).value = value + return false + } + + // Add new item + ent := &entry{key, value} + entry := c.evictList.PushFront(ent) + c.items[key] = entry + + evict := c.evictList.Len() > c.size + // Verify size not exceeded + if evict { + c.removeOldest() + } + return evict +} + +// Get looks up a key's value from the cache. +func (c *LRU) Get(key interface{}) (value interface{}, ok bool) { + if ent, ok := c.items[key]; ok { + c.evictList.MoveToFront(ent) + if ent.Value.(*entry) == nil { + return nil, false + } + return ent.Value.(*entry).value, true + } + return +} + +// Contains checks if a key is in the cache, without updating the recent-ness +// or deleting it for being stale. +func (c *LRU) Contains(key interface{}) (ok bool) { + _, ok = c.items[key] + return ok +} + +// Peek returns the key value (or undefined if not found) without updating +// the "recently used"-ness of the key. +func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) { + var ent *list.Element + if ent, ok = c.items[key]; ok { + return ent.Value.(*entry).value, true + } + return nil, ok +} + +// Remove removes the provided key from the cache, returning if the +// key was contained. +func (c *LRU) Remove(key interface{}) (present bool) { + if ent, ok := c.items[key]; ok { + c.removeElement(ent) + return true + } + return false +} + +// RemoveOldest removes the oldest item from the cache. +func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) { + ent := c.evictList.Back() + if ent != nil { + c.removeElement(ent) + kv := ent.Value.(*entry) + return kv.key, kv.value, true + } + return nil, nil, false +} + +// GetOldest returns the oldest entry +func (c *LRU) GetOldest() (key interface{}, value interface{}, ok bool) { + ent := c.evictList.Back() + if ent != nil { + kv := ent.Value.(*entry) + return kv.key, kv.value, true + } + return nil, nil, false +} + +// Keys returns a slice of the keys in the cache, from oldest to newest. +func (c *LRU) Keys() []interface{} { + keys := make([]interface{}, len(c.items)) + i := 0 + for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() { + keys[i] = ent.Value.(*entry).key + i++ + } + return keys +} + +// Len returns the number of items in the cache. +func (c *LRU) Len() int { + return c.evictList.Len() +} + +// Resize changes the cache size. +func (c *LRU) Resize(size int) (evicted int) { + diff := c.Len() - size + if diff < 0 { + diff = 0 + } + for i := 0; i < diff; i++ { + c.removeOldest() + } + c.size = size + return diff +} + +// removeOldest removes the oldest item from the cache. +func (c *LRU) removeOldest() { + ent := c.evictList.Back() + if ent != nil { + c.removeElement(ent) + } +} + +// removeElement is used to remove a given list element from the cache +func (c *LRU) removeElement(e *list.Element) { + c.evictList.Remove(e) + kv := e.Value.(*entry) + delete(c.items, kv.key) + if c.onEvict != nil { + c.onEvict(kv.key, kv.value) + } +} diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go new file mode 100644 index 000000000..92d70934d --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go @@ -0,0 +1,39 @@ +package simplelru + +// LRUCache is the interface for simple LRU cache. +type LRUCache interface { + // Adds a value to the cache, returns true if an eviction occurred and + // updates the "recently used"-ness of the key. + Add(key, value interface{}) bool + + // Returns key's value from the cache and + // updates the "recently used"-ness of the key. #value, isFound + Get(key interface{}) (value interface{}, ok bool) + + // Checks if a key exists in cache without updating the recent-ness. + Contains(key interface{}) (ok bool) + + // Returns key's value without updating the "recently used"-ness of the key. + Peek(key interface{}) (value interface{}, ok bool) + + // Removes a key from the cache. + Remove(key interface{}) bool + + // Removes the oldest entry from cache. + RemoveOldest() (interface{}, interface{}, bool) + + // Returns the oldest entry from the cache. #key, value, isFound + GetOldest() (interface{}, interface{}, bool) + + // Returns a slice of the keys in the cache, from oldest to newest. + Keys() []interface{} + + // Returns the number of items in the cache. + Len() int + + // Clears all cache entries. + Purge() + + // Resizes cache, returning number evicted + Resize(int) int +} diff --git a/vendor/github.com/josharian/intern/README.md b/vendor/github.com/josharian/intern/README.md deleted file mode 100644 index ffc44b219..000000000 --- a/vendor/github.com/josharian/intern/README.md +++ /dev/null @@ -1,5 +0,0 @@ -Docs: https://godoc.org/github.com/josharian/intern - -See also [Go issue 5160](https://golang.org/issue/5160). - -License: MIT diff --git a/vendor/github.com/josharian/intern/intern.go b/vendor/github.com/josharian/intern/intern.go deleted file mode 100644 index 7acb1fe90..000000000 --- a/vendor/github.com/josharian/intern/intern.go +++ /dev/null @@ -1,44 +0,0 @@ -// Package intern interns strings. -// Interning is best effort only. -// Interned strings may be removed automatically -// at any time without notification. -// All functions may be called concurrently -// with themselves and each other. -package intern - -import "sync" - -var ( - pool sync.Pool = sync.Pool{ - New: func() interface{} { - return make(map[string]string) - }, - } -) - -// String returns s, interned. -func String(s string) string { - m := pool.Get().(map[string]string) - c, ok := m[s] - if ok { - pool.Put(m) - return c - } - m[s] = s - pool.Put(m) - return s -} - -// Bytes returns b converted to a string, interned. -func Bytes(b []byte) string { - m := pool.Get().(map[string]string) - c, ok := m[string(b)] - if ok { - pool.Put(m) - return c - } - s := string(b) - m[s] = s - pool.Put(m) - return s -} diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index 244ee19c4..af2ef6395 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -27,6 +27,16 @@ Use the links above for more information on each. # changelog +* Oct 20, 2025 - [1.18.1](https://github.com/klauspost/compress/releases/tag/v1.18.1) + * zstd: Add simple zstd EncodeTo/DecodeTo functions https://github.com/klauspost/compress/pull/1079 + * zstd: Fix incorrect buffer size in dictionary encodes https://github.com/klauspost/compress/pull/1059 + * s2: check for cap, not len of buffer in EncodeBetter/Best by @vdarulis in https://github.com/klauspost/compress/pull/1080 + * zlib: Avoiding extra allocation in zlib.reader.Reset by @travelpolicy in https://github.com/klauspost/compress/pull/1086 + * gzhttp: remove redundant err check in zstdReader by @ryanfowler in https://github.com/klauspost/compress/pull/1090 + * flate: Faster load+store https://github.com/klauspost/compress/pull/1104 + * flate: Simplify matchlen https://github.com/klauspost/compress/pull/1101 + * flate: Use exact sizes for huffman tables https://github.com/klauspost/compress/pull/1103 + * Feb 19th, 2025 - [1.18.0](https://github.com/klauspost/compress/releases/tag/v1.18.0) * Add unsafe little endian loaders https://github.com/klauspost/compress/pull/1036 * fix: check `r.err != nil` but return a nil value error `err` by @alingse in https://github.com/klauspost/compress/pull/1028 @@ -36,6 +46,9 @@ Use the links above for more information on each. * flate: Fix matchlen L5+L6 https://github.com/klauspost/compress/pull/1049 * flate: Cleanup & reduce casts https://github.com/klauspost/compress/pull/1050 +
+ See changes to v1.17.x + * Oct 11th, 2024 - [1.17.11](https://github.com/klauspost/compress/releases/tag/v1.17.11) * zstd: Fix extra CRC written with multiple Close calls https://github.com/klauspost/compress/pull/1017 * s2: Don't use stack for index tables https://github.com/klauspost/compress/pull/1014 @@ -102,7 +115,8 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp * s2: Do 2 overlapping match checks https://github.com/klauspost/compress/pull/839 * flate: Add amd64 assembly matchlen https://github.com/klauspost/compress/pull/837 * gzip: Copy bufio.Reader on Reset by @thatguystone in https://github.com/klauspost/compress/pull/860 - + +
See changes to v1.16.x @@ -669,3 +683,4 @@ Here are other packages of good quality and pure Go (no cgo wrappers or autoconv # license This code is licensed under the same conditions as the original Go code. See LICENSE file. + diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go index 4e92f5998..57d17eeab 100644 --- a/vendor/github.com/klauspost/compress/flate/deflate.go +++ b/vendor/github.com/klauspost/compress/flate/deflate.go @@ -421,7 +421,9 @@ func (d *compressor) deflateLazy() { d.h = newHuffmanEncoder(maxFlateBlockTokens) } var tmp [256]uint16 - for _, v := range d.window[s.index:d.windowEnd] { + toIndex := d.window[s.index:d.windowEnd] + toIndex = toIndex[:min(len(toIndex), maxFlateBlockTokens)] + for _, v := range toIndex { tmp[v]++ } d.h.generate(tmp[:], 15) diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go index 03a179697..7151140cc 100644 --- a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go +++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go @@ -646,7 +646,7 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b w.lastHeader = 0 } - numLiterals, numOffsets := w.indexTokens(tokens, fillReuse && !sync) + numLiterals, numOffsets := w.indexTokens(tokens, true) extraBits := 0 ssize, storable := w.storedSize(input) @@ -781,7 +781,7 @@ func (w *huffmanBitWriter) fillTokens() { // literalFreq and offsetFreq, and generates literalEncoding // and offsetEncoding. // The number of literal and offset tokens is returned. -func (w *huffmanBitWriter) indexTokens(t *tokens, filled bool) (numLiterals, numOffsets int) { +func (w *huffmanBitWriter) indexTokens(t *tokens, alwaysEOB bool) (numLiterals, numOffsets int) { //copy(w.literalFreq[:], t.litHist[:]) *(*[256]uint16)(w.literalFreq[:]) = t.litHist //copy(w.literalFreq[256:], t.extraHist[:]) @@ -791,9 +791,10 @@ func (w *huffmanBitWriter) indexTokens(t *tokens, filled bool) (numLiterals, num if t.n == 0 { return } - if filled { - return maxNumLit, maxNumDist + if alwaysEOB { + w.literalFreq[endBlockMarker] = 1 } + // get the number of literals numLiterals = len(w.literalFreq) for w.literalFreq[numLiterals-1] == 0 { diff --git a/vendor/github.com/klauspost/compress/flate/stateless.go b/vendor/github.com/klauspost/compress/flate/stateless.go index 90b74f7ac..455ed3e2b 100644 --- a/vendor/github.com/klauspost/compress/flate/stateless.go +++ b/vendor/github.com/klauspost/compress/flate/stateless.go @@ -61,13 +61,19 @@ var bitWriterPool = sync.Pool{ }, } +// tokensPool contains tokens struct objects that can be reused +var tokensPool = sync.Pool{ + New: func() any { + return &tokens{} + }, +} + // StatelessDeflate allows compressing directly to a Writer without retaining state. // When returning everything will be flushed. // Up to 8KB of an optional dictionary can be given which is presumed to precede the block. // Longer dictionaries will be truncated and will still produce valid output. // Sending nil dictionary is perfectly fine. func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error { - var dst tokens bw := bitWriterPool.Get().(*huffmanBitWriter) bw.reset(out) defer func() { @@ -91,6 +97,12 @@ func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error { // For subsequent loops, keep shallow dict reference to avoid alloc+copy. var inDict []byte + dst := tokensPool.Get().(*tokens) + dst.Reset() + defer func() { + tokensPool.Put(dst) + }() + for len(in) > 0 { todo := in if len(inDict) > 0 { @@ -113,9 +125,9 @@ func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error { } // Compress if len(inDict) == 0 { - statelessEnc(&dst, todo, int16(len(dict))) + statelessEnc(dst, todo, int16(len(dict))) } else { - statelessEnc(&dst, inDict[:maxStatelessDict+len(todo)], maxStatelessDict) + statelessEnc(dst, inDict[:maxStatelessDict+len(todo)], maxStatelessDict) } isEof := eof && len(in) == 0 @@ -129,7 +141,7 @@ func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error { // If we removed less than 1/16th, huffman compress the block. bw.writeBlockHuff(isEof, uncompressed, len(in) == 0) } else { - bw.writeBlockDynamic(&dst, isEof, uncompressed, len(in) == 0) + bw.writeBlockDynamic(dst, isEof, uncompressed, len(in) == 0) } if len(in) > 0 { // Retain a dict if we have more diff --git a/vendor/github.com/mailru/easyjson/.gitignore b/vendor/github.com/mailru/easyjson/.gitignore deleted file mode 100644 index fbfaf7a3f..000000000 --- a/vendor/github.com/mailru/easyjson/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -.root -*_easyjson.go -*.iml -.idea -*.swp -bin/* diff --git a/vendor/github.com/mailru/easyjson/LICENSE b/vendor/github.com/mailru/easyjson/LICENSE deleted file mode 100644 index fbff658f7..000000000 --- a/vendor/github.com/mailru/easyjson/LICENSE +++ /dev/null @@ -1,7 +0,0 @@ -Copyright (c) 2016 Mail.Ru Group - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/mailru/easyjson/Makefile b/vendor/github.com/mailru/easyjson/Makefile deleted file mode 100644 index cc5ebbad3..000000000 --- a/vendor/github.com/mailru/easyjson/Makefile +++ /dev/null @@ -1,72 +0,0 @@ -all: test - -clean: - rm -rf bin - rm -rf tests/*_easyjson.go - rm -rf benchmark/*_easyjson.go - -build: - go build -o ./bin/easyjson ./easyjson - -generate: build - bin/easyjson -stubs \ - ./tests/snake.go \ - ./tests/data.go \ - ./tests/omitempty.go \ - ./tests/nothing.go \ - ./tests/named_type.go \ - ./tests/custom_map_key_type.go \ - ./tests/embedded_type.go \ - ./tests/reference_to_pointer.go \ - ./tests/html.go \ - ./tests/unknown_fields.go \ - ./tests/type_declaration.go \ - ./tests/type_declaration_skip.go \ - ./tests/members_escaped.go \ - ./tests/members_unescaped.go \ - ./tests/intern.go \ - ./tests/nocopy.go \ - ./tests/escaping.go - bin/easyjson -all \ - ./tests/data.go \ - ./tests/nothing.go \ - ./tests/errors.go \ - ./tests/html.go \ - ./tests/type_declaration_skip.go - bin/easyjson \ - ./tests/nested_easy.go \ - ./tests/named_type.go \ - ./tests/custom_map_key_type.go \ - ./tests/embedded_type.go \ - ./tests/reference_to_pointer.go \ - ./tests/key_marshaler_map.go \ - ./tests/unknown_fields.go \ - ./tests/type_declaration.go \ - ./tests/members_escaped.go \ - ./tests/intern.go \ - ./tests/nocopy.go \ - ./tests/escaping.go \ - ./tests/nested_marshaler.go - bin/easyjson -snake_case ./tests/snake.go - bin/easyjson -omit_empty ./tests/omitempty.go - bin/easyjson -build_tags=use_easyjson -disable_members_unescape ./benchmark/data.go - bin/easyjson -disallow_unknown_fields ./tests/disallow_unknown.go - bin/easyjson -disable_members_unescape ./tests/members_unescaped.go - -test: generate - go test \ - ./tests \ - ./jlexer \ - ./gen \ - ./buffer - cd benchmark && go test -benchmem -tags use_easyjson -bench . - golint -set_exit_status ./tests/*_easyjson.go - -bench-other: generate - cd benchmark && make - -bench-python: - benchmark/ujson.sh - - -.PHONY: clean generate test build diff --git a/vendor/github.com/mailru/easyjson/README.md b/vendor/github.com/mailru/easyjson/README.md deleted file mode 100644 index 943b9e4ce..000000000 --- a/vendor/github.com/mailru/easyjson/README.md +++ /dev/null @@ -1,408 +0,0 @@ -# easyjson [![Build Status](https://github.com/mailru/easyjson/actions/workflows/easyjson.yml/badge.svg)](https://github.com/mailru/easyjson/actions/workflows/easyjson.yml) [![Go Report Card](https://goreportcard.com/badge/github.com/mailru/easyjson)](https://goreportcard.com/report/github.com/mailru/easyjson) - -Package easyjson provides a fast and easy way to marshal/unmarshal Go structs -to/from JSON without the use of reflection. In performance tests, easyjson -outperforms the standard `encoding/json` package by a factor of 4-5x, and other -JSON encoding packages by a factor of 2-3x. - -easyjson aims to keep generated Go code simple enough so that it can be easily -optimized or fixed. Another goal is to provide users with the ability to -customize the generated code by providing options not available with the -standard `encoding/json` package, such as generating "snake_case" names or -enabling `omitempty` behavior by default. - -## Usage -### Install: -```sh -# for Go < 1.17 -go get -u github.com/mailru/easyjson/... -``` -#### or -```sh -# for Go >= 1.17 -go get github.com/mailru/easyjson && go install github.com/mailru/easyjson/...@latest -``` -### Run: -```sh -easyjson -all .go -``` - -The above will generate `_easyjson.go` containing the appropriate marshaler and -unmarshaler funcs for all structs contained in `.go`. - -Please note that easyjson requires a full Go build environment and the `GOPATH` -environment variable to be set. This is because easyjson code generation -invokes `go run` on a temporary file (an approach to code generation borrowed -from [ffjson](https://github.com/pquerna/ffjson)). - -### Serialize -```go -someStruct := &SomeStruct{Field1: "val1", Field2: "val2"} -rawBytes, err := easyjson.Marshal(someStruct) -``` - -### Deserialize -```go -someStruct := &SomeStruct{} -err := easyjson.Unmarshal(rawBytes, someStruct) -``` - -Please see the [GoDoc](https://godoc.org/github.com/mailru/easyjson) -for more information and features. -## Options -```txt -Usage of easyjson: - -all - generate marshaler/unmarshalers for all structs in a file - -build_tags string - build tags to add to generated file - -gen_build_flags string - build flags when running the generator while bootstrapping - -byte - use simple bytes instead of Base64Bytes for slice of bytes - -leave_temps - do not delete temporary files - -no_std_marshalers - don't generate MarshalJSON/UnmarshalJSON funcs - -noformat - do not run 'gofmt -w' on output file - -omit_empty - omit empty fields by default - -output_filename string - specify the filename of the output - -pkg - process the whole package instead of just the given file - -snake_case - use snake_case names instead of CamelCase by default - -lower_camel_case - use lowerCamelCase instead of CamelCase by default - -stubs - only generate stubs for marshaler/unmarshaler funcs - -disallow_unknown_fields - return error if some unknown field in json appeared - -disable_members_unescape - disable unescaping of \uXXXX string sequences in member names -``` - -Using `-all` will generate marshalers/unmarshalers for all Go structs in the -file excluding those structs whose preceding comment starts with `easyjson:skip`. -For example: - -```go -//easyjson:skip -type A struct {} -``` - -If `-all` is not provided, then only those structs whose preceding -comment starts with `easyjson:json` will have marshalers/unmarshalers -generated. For example: - -```go -//easyjson:json -type A struct {} -``` - -Additional option notes: - -* `-snake_case` tells easyjson to generate snake\_case field names by default - (unless overridden by a field tag). The CamelCase to snake\_case conversion - algorithm should work in most cases (ie, HTTPVersion will be converted to - "http_version"). - -* `-build_tags` will add the specified build tags to generated Go sources. - -* `-gen_build_flags` will execute the easyjson bootstapping code to launch the - actual generator command with provided flags. Multiple arguments should be - separated by space e.g. `-gen_build_flags="-mod=mod -x"`. - -## Structure json tag options - -Besides standard json tag options like 'omitempty' the following are supported: - -* 'nocopy' - disables allocation and copying of string values, making them - refer to original json buffer memory. This works great for short lived - objects which are not hold in memory after decoding and immediate usage. - Note if string requires unescaping it will be processed as normally. -* 'intern' - string "interning" (deduplication) to save memory when the very - same string dictionary values are often met all over the structure. - See below for more details. - -## Generated Marshaler/Unmarshaler Funcs - -For Go struct types, easyjson generates the funcs `MarshalEasyJSON` / -`UnmarshalEasyJSON` for marshaling/unmarshaling JSON. In turn, these satisfy -the `easyjson.Marshaler` and `easyjson.Unmarshaler` interfaces and when used in -conjunction with `easyjson.Marshal` / `easyjson.Unmarshal` avoid unnecessary -reflection / type assertions during marshaling/unmarshaling to/from JSON for Go -structs. - -easyjson also generates `MarshalJSON` and `UnmarshalJSON` funcs for Go struct -types compatible with the standard `json.Marshaler` and `json.Unmarshaler` -interfaces. Please be aware that using the standard `json.Marshal` / -`json.Unmarshal` for marshaling/unmarshaling will incur a significant -performance penalty when compared to using `easyjson.Marshal` / -`easyjson.Unmarshal`. - -Additionally, easyjson exposes utility funcs that use the `MarshalEasyJSON` and -`UnmarshalEasyJSON` for marshaling/unmarshaling to and from standard readers -and writers. For example, easyjson provides `easyjson.MarshalToHTTPResponseWriter` -which marshals to the standard `http.ResponseWriter`. Please see the [GoDoc -listing](https://godoc.org/github.com/mailru/easyjson) for the full listing of -utility funcs that are available. - -## Controlling easyjson Marshaling and Unmarshaling Behavior - -Go types can provide their own `MarshalEasyJSON` and `UnmarshalEasyJSON` funcs -that satisfy the `easyjson.Marshaler` / `easyjson.Unmarshaler` interfaces. -These will be used by `easyjson.Marshal` and `easyjson.Unmarshal` when defined -for a Go type. - -Go types can also satisfy the `easyjson.Optional` interface, which allows the -type to define its own `omitempty` logic. - -## Type Wrappers - -easyjson provides additional type wrappers defined in the `easyjson/opt` -package. These wrap the standard Go primitives and in turn satisfy the -easyjson interfaces. - -The `easyjson/opt` type wrappers are useful when needing to distinguish between -a missing value and/or when needing to specifying a default value. Type -wrappers allow easyjson to avoid additional pointers and heap allocations and -can significantly increase performance when used properly. - -## Memory Pooling - -easyjson uses a buffer pool that allocates data in increasing chunks from 128 -to 32768 bytes. Chunks of 512 bytes and larger will be reused with the help of -`sync.Pool`. The maximum size of a chunk is bounded to reduce redundant memory -allocation and to allow larger reusable buffers. - -easyjson's custom allocation buffer pool is defined in the `easyjson/buffer` -package, and the default behavior pool behavior can be modified (if necessary) -through a call to `buffer.Init()` prior to any marshaling or unmarshaling. -Please see the [GoDoc listing](https://godoc.org/github.com/mailru/easyjson/buffer) -for more information. - -## String interning - -During unmarshaling, `string` field values can be optionally -[interned](https://en.wikipedia.org/wiki/String_interning) to reduce memory -allocations and usage by deduplicating strings in memory, at the expense of slightly -increased CPU usage. - -This will work effectively only for `string` fields being decoded that have frequently -the same value (e.g. if you have a string field that can only assume a small number -of possible values). - -To enable string interning, add the `intern` keyword tag to your `json` tag on `string` -fields, e.g.: - -```go -type Foo struct { - UUID string `json:"uuid"` // will not be interned during unmarshaling - State string `json:"state,intern"` // will be interned during unmarshaling -} -``` - -## Issues, Notes, and Limitations - -* easyjson is still early in its development. As such, there are likely to be - bugs and missing features when compared to `encoding/json`. In the case of a - missing feature or bug, please create a GitHub issue. Pull requests are - welcome! - -* Unlike `encoding/json`, object keys are case-sensitive. Case-insensitive - matching is not currently provided due to the significant performance hit - when doing case-insensitive key matching. In the future, case-insensitive - object key matching may be provided via an option to the generator. - -* easyjson makes use of `unsafe`, which simplifies the code and - provides significant performance benefits by allowing no-copy - conversion from `[]byte` to `string`. That said, `unsafe` is used - only when unmarshaling and parsing JSON, and any `unsafe` operations - / memory allocations done will be safely deallocated by - easyjson. Set the build tag `easyjson_nounsafe` to compile it - without `unsafe`. - -* easyjson is compatible with Google App Engine. The `appengine` build - tag (set by App Engine's environment) will automatically disable the - use of `unsafe`, which is not allowed in App Engine's Standard - Environment. Note that the use with App Engine is still experimental. - -* Floats are formatted using the default precision from Go's `strconv` package. - As such, easyjson will not correctly handle high precision floats when - marshaling/unmarshaling JSON. Note, however, that there are very few/limited - uses where this behavior is not sufficient for general use. That said, a - different package may be needed if precise marshaling/unmarshaling of high - precision floats to/from JSON is required. - -* While unmarshaling, the JSON parser does the minimal amount of work needed to - skip over unmatching parens, and as such full validation is not done for the - entire JSON value being unmarshaled/parsed. - -* Currently there is no true streaming support for encoding/decoding as - typically for many uses/protocols the final, marshaled length of the JSON - needs to be known prior to sending the data. Currently this is not possible - with easyjson's architecture. - -* easyjson parser and codegen based on reflection, so it won't work on `package main` - files, because they cant be imported by parser. - -## Benchmarks - -Most benchmarks were done using the example -[13kB example JSON](https://dev.twitter.com/rest/reference/get/search/tweets) -(9k after eliminating whitespace). This example is similar to real-world data, -is well-structured, and contains a healthy variety of different types, making -it ideal for JSON serialization benchmarks. - -Note: - -* For small request benchmarks, an 80 byte portion of the above example was - used. - -* For large request marshaling benchmarks, a struct containing 50 regular - samples was used, making a ~500kB output JSON. - -* Benchmarks are showing the results of easyjson's default behaviour, - which makes use of `unsafe`. - -Benchmarks are available in the repository and can be run by invoking `make`. - -### easyjson vs. encoding/json - -easyjson is roughly 5-6 times faster than the standard `encoding/json` for -unmarshaling, and 3-4 times faster for non-concurrent marshaling. Concurrent -marshaling is 6-7x faster if marshaling to a writer. - -### easyjson vs. ffjson - -easyjson uses the same approach for JSON marshaling as -[ffjson](https://github.com/pquerna/ffjson), but takes a significantly -different approach to lexing and parsing JSON during unmarshaling. This means -easyjson is roughly 2-3x faster for unmarshaling and 1.5-2x faster for -non-concurrent unmarshaling. - -As of this writing, `ffjson` seems to have issues when used concurrently: -specifically, large request pooling hurts `ffjson`'s performance and causes -scalability issues. These issues with `ffjson` can likely be fixed, but as of -writing remain outstanding/known issues with `ffjson`. - -easyjson and `ffjson` have similar performance for small requests, however -easyjson outperforms `ffjson` by roughly 2-5x times for large requests when -used with a writer. - -### easyjson vs. go/codec - -[go/codec](https://github.com/ugorji/go) provides -compile-time helpers for JSON generation. In this case, helpers do not work -like marshalers as they are encoding-independent. - -easyjson is generally 2x faster than `go/codec` for non-concurrent benchmarks -and about 3x faster for concurrent encoding (without marshaling to a writer). - -In an attempt to measure marshaling performance of `go/codec` (as opposed to -allocations/memcpy/writer interface invocations), a benchmark was done with -resetting length of a byte slice rather than resetting the whole slice to nil. -However, the optimization in this exact form may not be applicable in practice, -since the memory is not freed between marshaling operations. - -### easyjson vs 'ujson' python module - -[ujson](https://github.com/esnme/ultrajson) is using C code for parsing, so it -is interesting to see how plain golang compares to that. It is important to note -that the resulting object for python is slower to access, since the library -parses JSON object into dictionaries. - -easyjson is slightly faster for unmarshaling and 2-3x faster than `ujson` for -marshaling. - -### Benchmark Results - -`ffjson` results are from February 4th, 2016, using the latest `ffjson` and go1.6. -`go/codec` results are from March 4th, 2016, using the latest `go/codec` and go1.6. - -#### Unmarshaling - -| lib | json size | MB/s | allocs/op | B/op | -|:---------|:----------|-----:|----------:|------:| -| standard | regular | 22 | 218 | 10229 | -| standard | small | 9.7 | 14 | 720 | -| | | | | | -| easyjson | regular | 125 | 128 | 9794 | -| easyjson | small | 67 | 3 | 128 | -| | | | | | -| ffjson | regular | 66 | 141 | 9985 | -| ffjson | small | 17.6 | 10 | 488 | -| | | | | | -| codec | regular | 55 | 434 | 19299 | -| codec | small | 29 | 7 | 336 | -| | | | | | -| ujson | regular | 103 | N/A | N/A | - -#### Marshaling, one goroutine. - -| lib | json size | MB/s | allocs/op | B/op | -|:----------|:----------|-----:|----------:|------:| -| standard | regular | 75 | 9 | 23256 | -| standard | small | 32 | 3 | 328 | -| standard | large | 80 | 17 | 1.2M | -| | | | | | -| easyjson | regular | 213 | 9 | 10260 | -| easyjson* | regular | 263 | 8 | 742 | -| easyjson | small | 125 | 1 | 128 | -| easyjson | large | 212 | 33 | 490k | -| easyjson* | large | 262 | 25 | 2879 | -| | | | | | -| ffjson | regular | 122 | 153 | 21340 | -| ffjson** | regular | 146 | 152 | 4897 | -| ffjson | small | 36 | 5 | 384 | -| ffjson** | small | 64 | 4 | 128 | -| ffjson | large | 134 | 7317 | 818k | -| ffjson** | large | 125 | 7320 | 827k | -| | | | | | -| codec | regular | 80 | 17 | 33601 | -| codec*** | regular | 108 | 9 | 1153 | -| codec | small | 42 | 3 | 304 | -| codec*** | small | 56 | 1 | 48 | -| codec | large | 73 | 483 | 2.5M | -| codec*** | large | 103 | 451 | 66007 | -| | | | | | -| ujson | regular | 92 | N/A | N/A | - -\* marshaling to a writer, -\*\* using `ffjson.Pool()`, -\*\*\* reusing output slice instead of resetting it to nil - -#### Marshaling, concurrent. - -| lib | json size | MB/s | allocs/op | B/op | -|:----------|:----------|-----:|----------:|------:| -| standard | regular | 252 | 9 | 23257 | -| standard | small | 124 | 3 | 328 | -| standard | large | 289 | 17 | 1.2M | -| | | | | | -| easyjson | regular | 792 | 9 | 10597 | -| easyjson* | regular | 1748 | 8 | 779 | -| easyjson | small | 333 | 1 | 128 | -| easyjson | large | 718 | 36 | 548k | -| easyjson* | large | 2134 | 25 | 4957 | -| | | | | | -| ffjson | regular | 301 | 153 | 21629 | -| ffjson** | regular | 707 | 152 | 5148 | -| ffjson | small | 62 | 5 | 384 | -| ffjson** | small | 282 | 4 | 128 | -| ffjson | large | 438 | 7330 | 1.0M | -| ffjson** | large | 131 | 7319 | 820k | -| | | | | | -| codec | regular | 183 | 17 | 33603 | -| codec*** | regular | 671 | 9 | 1157 | -| codec | small | 147 | 3 | 304 | -| codec*** | small | 299 | 1 | 48 | -| codec | large | 190 | 483 | 2.5M | -| codec*** | large | 752 | 451 | 77574 | - -\* marshaling to a writer, -\*\* using `ffjson.Pool()`, -\*\*\* reusing output slice instead of resetting it to nil diff --git a/vendor/github.com/mailru/easyjson/buffer/pool.go b/vendor/github.com/mailru/easyjson/buffer/pool.go deleted file mode 100644 index 598a54af9..000000000 --- a/vendor/github.com/mailru/easyjson/buffer/pool.go +++ /dev/null @@ -1,278 +0,0 @@ -// Package buffer implements a buffer for serialization, consisting of a chain of []byte-s to -// reduce copying and to allow reuse of individual chunks. -package buffer - -import ( - "io" - "net" - "sync" -) - -// PoolConfig contains configuration for the allocation and reuse strategy. -type PoolConfig struct { - StartSize int // Minimum chunk size that is allocated. - PooledSize int // Minimum chunk size that is reused, reusing chunks too small will result in overhead. - MaxSize int // Maximum chunk size that will be allocated. -} - -var config = PoolConfig{ - StartSize: 128, - PooledSize: 512, - MaxSize: 32768, -} - -// Reuse pool: chunk size -> pool. -var buffers = map[int]*sync.Pool{} - -func initBuffers() { - for l := config.PooledSize; l <= config.MaxSize; l *= 2 { - buffers[l] = new(sync.Pool) - } -} - -func init() { - initBuffers() -} - -// Init sets up a non-default pooling and allocation strategy. Should be run before serialization is done. -func Init(cfg PoolConfig) { - config = cfg - initBuffers() -} - -// putBuf puts a chunk to reuse pool if it can be reused. -func putBuf(buf []byte) { - size := cap(buf) - if size < config.PooledSize { - return - } - if c := buffers[size]; c != nil { - c.Put(buf[:0]) - } -} - -// getBuf gets a chunk from reuse pool or creates a new one if reuse failed. -func getBuf(size int) []byte { - if size >= config.PooledSize { - if c := buffers[size]; c != nil { - v := c.Get() - if v != nil { - return v.([]byte) - } - } - } - return make([]byte, 0, size) -} - -// Buffer is a buffer optimized for serialization without extra copying. -type Buffer struct { - - // Buf is the current chunk that can be used for serialization. - Buf []byte - - toPool []byte - bufs [][]byte -} - -// EnsureSpace makes sure that the current chunk contains at least s free bytes, -// possibly creating a new chunk. -func (b *Buffer) EnsureSpace(s int) { - if cap(b.Buf)-len(b.Buf) < s { - b.ensureSpaceSlow(s) - } -} - -func (b *Buffer) ensureSpaceSlow(s int) { - l := len(b.Buf) - if l > 0 { - if cap(b.toPool) != cap(b.Buf) { - // Chunk was reallocated, toPool can be pooled. - putBuf(b.toPool) - } - if cap(b.bufs) == 0 { - b.bufs = make([][]byte, 0, 8) - } - b.bufs = append(b.bufs, b.Buf) - l = cap(b.toPool) * 2 - } else { - l = config.StartSize - } - - if l > config.MaxSize { - l = config.MaxSize - } - b.Buf = getBuf(l) - b.toPool = b.Buf -} - -// AppendByte appends a single byte to buffer. -func (b *Buffer) AppendByte(data byte) { - b.EnsureSpace(1) - b.Buf = append(b.Buf, data) -} - -// AppendBytes appends a byte slice to buffer. -func (b *Buffer) AppendBytes(data []byte) { - if len(data) <= cap(b.Buf)-len(b.Buf) { - b.Buf = append(b.Buf, data...) // fast path - } else { - b.appendBytesSlow(data) - } -} - -func (b *Buffer) appendBytesSlow(data []byte) { - for len(data) > 0 { - b.EnsureSpace(1) - - sz := cap(b.Buf) - len(b.Buf) - if sz > len(data) { - sz = len(data) - } - - b.Buf = append(b.Buf, data[:sz]...) - data = data[sz:] - } -} - -// AppendString appends a string to buffer. -func (b *Buffer) AppendString(data string) { - if len(data) <= cap(b.Buf)-len(b.Buf) { - b.Buf = append(b.Buf, data...) // fast path - } else { - b.appendStringSlow(data) - } -} - -func (b *Buffer) appendStringSlow(data string) { - for len(data) > 0 { - b.EnsureSpace(1) - - sz := cap(b.Buf) - len(b.Buf) - if sz > len(data) { - sz = len(data) - } - - b.Buf = append(b.Buf, data[:sz]...) - data = data[sz:] - } -} - -// Size computes the size of a buffer by adding sizes of every chunk. -func (b *Buffer) Size() int { - size := len(b.Buf) - for _, buf := range b.bufs { - size += len(buf) - } - return size -} - -// DumpTo outputs the contents of a buffer to a writer and resets the buffer. -func (b *Buffer) DumpTo(w io.Writer) (written int, err error) { - bufs := net.Buffers(b.bufs) - if len(b.Buf) > 0 { - bufs = append(bufs, b.Buf) - } - n, err := bufs.WriteTo(w) - - for _, buf := range b.bufs { - putBuf(buf) - } - putBuf(b.toPool) - - b.bufs = nil - b.Buf = nil - b.toPool = nil - - return int(n), err -} - -// BuildBytes creates a single byte slice with all the contents of the buffer. Data is -// copied if it does not fit in a single chunk. You can optionally provide one byte -// slice as argument that it will try to reuse. -func (b *Buffer) BuildBytes(reuse ...[]byte) []byte { - if len(b.bufs) == 0 { - ret := b.Buf - b.toPool = nil - b.Buf = nil - return ret - } - - var ret []byte - size := b.Size() - - // If we got a buffer as argument and it is big enough, reuse it. - if len(reuse) == 1 && cap(reuse[0]) >= size { - ret = reuse[0][:0] - } else { - ret = make([]byte, 0, size) - } - for _, buf := range b.bufs { - ret = append(ret, buf...) - putBuf(buf) - } - - ret = append(ret, b.Buf...) - putBuf(b.toPool) - - b.bufs = nil - b.toPool = nil - b.Buf = nil - - return ret -} - -type readCloser struct { - offset int - bufs [][]byte -} - -func (r *readCloser) Read(p []byte) (n int, err error) { - for _, buf := range r.bufs { - // Copy as much as we can. - x := copy(p[n:], buf[r.offset:]) - n += x // Increment how much we filled. - - // Did we empty the whole buffer? - if r.offset+x == len(buf) { - // On to the next buffer. - r.offset = 0 - r.bufs = r.bufs[1:] - - // We can release this buffer. - putBuf(buf) - } else { - r.offset += x - } - - if n == len(p) { - break - } - } - // No buffers left or nothing read? - if len(r.bufs) == 0 { - err = io.EOF - } - return -} - -func (r *readCloser) Close() error { - // Release all remaining buffers. - for _, buf := range r.bufs { - putBuf(buf) - } - // In case Close gets called multiple times. - r.bufs = nil - - return nil -} - -// ReadCloser creates an io.ReadCloser with all the contents of the buffer. -func (b *Buffer) ReadCloser() io.ReadCloser { - ret := &readCloser{0, append(b.bufs, b.Buf)} - - b.bufs = nil - b.toPool = nil - b.Buf = nil - - return ret -} diff --git a/vendor/github.com/mailru/easyjson/helpers.go b/vendor/github.com/mailru/easyjson/helpers.go deleted file mode 100644 index efe34bf2a..000000000 --- a/vendor/github.com/mailru/easyjson/helpers.go +++ /dev/null @@ -1,114 +0,0 @@ -// Package easyjson contains marshaler/unmarshaler interfaces and helper functions. -package easyjson - -import ( - "io" - "io/ioutil" - "net/http" - "strconv" - "unsafe" - - "github.com/mailru/easyjson/jlexer" - "github.com/mailru/easyjson/jwriter" -) - -// Marshaler is an easyjson-compatible marshaler interface. -type Marshaler interface { - MarshalEasyJSON(w *jwriter.Writer) -} - -// Unmarshaler is an easyjson-compatible unmarshaler interface. -type Unmarshaler interface { - UnmarshalEasyJSON(w *jlexer.Lexer) -} - -// MarshalerUnmarshaler is an easyjson-compatible marshaler/unmarshaler interface. -type MarshalerUnmarshaler interface { - Marshaler - Unmarshaler -} - -// Optional defines an undefined-test method for a type to integrate with 'omitempty' logic. -type Optional interface { - IsDefined() bool -} - -// UnknownsUnmarshaler provides a method to unmarshal unknown struct fileds and save them as you want -type UnknownsUnmarshaler interface { - UnmarshalUnknown(in *jlexer.Lexer, key string) -} - -// UnknownsMarshaler provides a method to write additional struct fields -type UnknownsMarshaler interface { - MarshalUnknowns(w *jwriter.Writer, first bool) -} - -func isNilInterface(i interface{}) bool { - return (*[2]uintptr)(unsafe.Pointer(&i))[1] == 0 -} - -// Marshal returns data as a single byte slice. Method is suboptimal as the data is likely to be copied -// from a chain of smaller chunks. -func Marshal(v Marshaler) ([]byte, error) { - if isNilInterface(v) { - return nullBytes, nil - } - - w := jwriter.Writer{} - v.MarshalEasyJSON(&w) - return w.BuildBytes() -} - -// MarshalToWriter marshals the data to an io.Writer. -func MarshalToWriter(v Marshaler, w io.Writer) (written int, err error) { - if isNilInterface(v) { - return w.Write(nullBytes) - } - - jw := jwriter.Writer{} - v.MarshalEasyJSON(&jw) - return jw.DumpTo(w) -} - -// MarshalToHTTPResponseWriter sets Content-Length and Content-Type headers for the -// http.ResponseWriter, and send the data to the writer. started will be equal to -// false if an error occurred before any http.ResponseWriter methods were actually -// invoked (in this case a 500 reply is possible). -func MarshalToHTTPResponseWriter(v Marshaler, w http.ResponseWriter) (started bool, written int, err error) { - if isNilInterface(v) { - w.Header().Set("Content-Type", "application/json") - w.Header().Set("Content-Length", strconv.Itoa(len(nullBytes))) - written, err = w.Write(nullBytes) - return true, written, err - } - - jw := jwriter.Writer{} - v.MarshalEasyJSON(&jw) - if jw.Error != nil { - return false, 0, jw.Error - } - w.Header().Set("Content-Type", "application/json") - w.Header().Set("Content-Length", strconv.Itoa(jw.Size())) - - started = true - written, err = jw.DumpTo(w) - return -} - -// Unmarshal decodes the JSON in data into the object. -func Unmarshal(data []byte, v Unmarshaler) error { - l := jlexer.Lexer{Data: data} - v.UnmarshalEasyJSON(&l) - return l.Error() -} - -// UnmarshalFromReader reads all the data in the reader and decodes as JSON into the object. -func UnmarshalFromReader(r io.Reader, v Unmarshaler) error { - data, err := ioutil.ReadAll(r) - if err != nil { - return err - } - l := jlexer.Lexer{Data: data} - v.UnmarshalEasyJSON(&l) - return l.Error() -} diff --git a/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go b/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go deleted file mode 100644 index e68108f86..000000000 --- a/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go +++ /dev/null @@ -1,21 +0,0 @@ -// This file will only be included to the build if neither -// easyjson_nounsafe nor appengine build tag is set. See README notes -// for more details. - -//+build !easyjson_nounsafe -//+build !appengine - -package jlexer - -import ( - "unsafe" -) - -// bytesToStr creates a string pointing at the slice to avoid copying. -// -// Warning: the string returned by the function should be used with care, as the whole input data -// chunk may be either blocked from being freed by GC because of a single string or the buffer.Data -// may be garbage-collected even when the string exists. -func bytesToStr(data []byte) string { - return *(*string)(unsafe.Pointer(&data)) -} diff --git a/vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go b/vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go deleted file mode 100644 index 864d1be67..000000000 --- a/vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go +++ /dev/null @@ -1,13 +0,0 @@ -// This file is included to the build if any of the buildtags below -// are defined. Refer to README notes for more details. - -//+build easyjson_nounsafe appengine - -package jlexer - -// bytesToStr creates a string normally from []byte -// -// Note that this method is roughly 1.5x slower than using the 'unsafe' method. -func bytesToStr(data []byte) string { - return string(data) -} diff --git a/vendor/github.com/mailru/easyjson/jlexer/error.go b/vendor/github.com/mailru/easyjson/jlexer/error.go deleted file mode 100644 index e90ec40d0..000000000 --- a/vendor/github.com/mailru/easyjson/jlexer/error.go +++ /dev/null @@ -1,15 +0,0 @@ -package jlexer - -import "fmt" - -// LexerError implements the error interface and represents all possible errors that can be -// generated during parsing the JSON data. -type LexerError struct { - Reason string - Offset int - Data string -} - -func (l *LexerError) Error() string { - return fmt.Sprintf("parse error: %s near offset %d of '%s'", l.Reason, l.Offset, l.Data) -} diff --git a/vendor/github.com/mailru/easyjson/jlexer/lexer.go b/vendor/github.com/mailru/easyjson/jlexer/lexer.go deleted file mode 100644 index a27705b12..000000000 --- a/vendor/github.com/mailru/easyjson/jlexer/lexer.go +++ /dev/null @@ -1,1257 +0,0 @@ -// Package jlexer contains a JSON lexer implementation. -// -// It is expected that it is mostly used with generated parser code, so the interface is tuned -// for a parser that knows what kind of data is expected. -package jlexer - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io" - "strconv" - "unicode" - "unicode/utf16" - "unicode/utf8" - - "github.com/josharian/intern" -) - -// TokenKind determines type of a token. -type TokenKind byte - -const ( - TokenUndef TokenKind = iota // No token. - TokenDelim // Delimiter: one of '{', '}', '[' or ']'. - TokenString // A string literal, e.g. "abc\u1234" - TokenNumber // Number literal, e.g. 1.5e5 - TokenBool // Boolean literal: true or false. - TokenNull // null keyword. -) - -// token describes a single token: type, position in the input and value. -type token struct { - kind TokenKind // Type of a token. - - boolValue bool // Value if a boolean literal token. - byteValueCloned bool // true if byteValue was allocated and does not refer to original json body - byteValue []byte // Raw value of a token. - delimValue byte -} - -// Lexer is a JSON lexer: it iterates over JSON tokens in a byte slice. -type Lexer struct { - Data []byte // Input data given to the lexer. - - start int // Start of the current token. - pos int // Current unscanned position in the input stream. - token token // Last scanned token, if token.kind != TokenUndef. - - firstElement bool // Whether current element is the first in array or an object. - wantSep byte // A comma or a colon character, which need to occur before a token. - - UseMultipleErrors bool // If we want to use multiple errors. - fatalError error // Fatal error occurred during lexing. It is usually a syntax error. - multipleErrors []*LexerError // Semantic errors occurred during lexing. Marshalling will be continued after finding this errors. -} - -// FetchToken scans the input for the next token. -func (r *Lexer) FetchToken() { - r.token.kind = TokenUndef - r.start = r.pos - - // Check if r.Data has r.pos element - // If it doesn't, it mean corrupted input data - if len(r.Data) < r.pos { - r.errParse("Unexpected end of data") - return - } - // Determine the type of a token by skipping whitespace and reading the - // first character. - for _, c := range r.Data[r.pos:] { - switch c { - case ':', ',': - if r.wantSep == c { - r.pos++ - r.start++ - r.wantSep = 0 - } else { - r.errSyntax() - } - - case ' ', '\t', '\r', '\n': - r.pos++ - r.start++ - - case '"': - if r.wantSep != 0 { - r.errSyntax() - } - - r.token.kind = TokenString - r.fetchString() - return - - case '{', '[': - if r.wantSep != 0 { - r.errSyntax() - } - r.firstElement = true - r.token.kind = TokenDelim - r.token.delimValue = r.Data[r.pos] - r.pos++ - return - - case '}', ']': - if !r.firstElement && (r.wantSep != ',') { - r.errSyntax() - } - r.wantSep = 0 - r.token.kind = TokenDelim - r.token.delimValue = r.Data[r.pos] - r.pos++ - return - - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-': - if r.wantSep != 0 { - r.errSyntax() - } - r.token.kind = TokenNumber - r.fetchNumber() - return - - case 'n': - if r.wantSep != 0 { - r.errSyntax() - } - - r.token.kind = TokenNull - r.fetchNull() - return - - case 't': - if r.wantSep != 0 { - r.errSyntax() - } - - r.token.kind = TokenBool - r.token.boolValue = true - r.fetchTrue() - return - - case 'f': - if r.wantSep != 0 { - r.errSyntax() - } - - r.token.kind = TokenBool - r.token.boolValue = false - r.fetchFalse() - return - - default: - r.errSyntax() - return - } - } - r.fatalError = io.EOF - return -} - -// isTokenEnd returns true if the char can follow a non-delimiter token -func isTokenEnd(c byte) bool { - return c == ' ' || c == '\t' || c == '\r' || c == '\n' || c == '[' || c == ']' || c == '{' || c == '}' || c == ',' || c == ':' -} - -// fetchNull fetches and checks remaining bytes of null keyword. -func (r *Lexer) fetchNull() { - r.pos += 4 - if r.pos > len(r.Data) || - r.Data[r.pos-3] != 'u' || - r.Data[r.pos-2] != 'l' || - r.Data[r.pos-1] != 'l' || - (r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) { - - r.pos -= 4 - r.errSyntax() - } -} - -// fetchTrue fetches and checks remaining bytes of true keyword. -func (r *Lexer) fetchTrue() { - r.pos += 4 - if r.pos > len(r.Data) || - r.Data[r.pos-3] != 'r' || - r.Data[r.pos-2] != 'u' || - r.Data[r.pos-1] != 'e' || - (r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) { - - r.pos -= 4 - r.errSyntax() - } -} - -// fetchFalse fetches and checks remaining bytes of false keyword. -func (r *Lexer) fetchFalse() { - r.pos += 5 - if r.pos > len(r.Data) || - r.Data[r.pos-4] != 'a' || - r.Data[r.pos-3] != 'l' || - r.Data[r.pos-2] != 's' || - r.Data[r.pos-1] != 'e' || - (r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) { - - r.pos -= 5 - r.errSyntax() - } -} - -// fetchNumber scans a number literal token. -func (r *Lexer) fetchNumber() { - hasE := false - afterE := false - hasDot := false - - r.pos++ - for i, c := range r.Data[r.pos:] { - switch { - case c >= '0' && c <= '9': - afterE = false - case c == '.' && !hasDot: - hasDot = true - case (c == 'e' || c == 'E') && !hasE: - hasE = true - hasDot = true - afterE = true - case (c == '+' || c == '-') && afterE: - afterE = false - default: - r.pos += i - if !isTokenEnd(c) { - r.errSyntax() - } else { - r.token.byteValue = r.Data[r.start:r.pos] - } - return - } - } - - r.pos = len(r.Data) - r.token.byteValue = r.Data[r.start:] -} - -// findStringLen tries to scan into the string literal for ending quote char to determine required size. -// The size will be exact if no escapes are present and may be inexact if there are escaped chars. -func findStringLen(data []byte) (isValid bool, length int) { - for { - idx := bytes.IndexByte(data, '"') - if idx == -1 { - return false, len(data) - } - if idx == 0 || (idx > 0 && data[idx-1] != '\\') { - return true, length + idx - } - - // count \\\\\\\ sequences. even number of slashes means quote is not really escaped - cnt := 1 - for idx-cnt-1 >= 0 && data[idx-cnt-1] == '\\' { - cnt++ - } - if cnt%2 == 0 { - return true, length + idx - } - - length += idx + 1 - data = data[idx+1:] - } -} - -// unescapeStringToken performs unescaping of string token. -// if no escaping is needed, original string is returned, otherwise - a new one allocated -func (r *Lexer) unescapeStringToken() (err error) { - data := r.token.byteValue - var unescapedData []byte - - for { - i := bytes.IndexByte(data, '\\') - if i == -1 { - break - } - - escapedRune, escapedBytes, err := decodeEscape(data[i:]) - if err != nil { - r.errParse(err.Error()) - return err - } - - if unescapedData == nil { - unescapedData = make([]byte, 0, len(r.token.byteValue)) - } - - var d [4]byte - s := utf8.EncodeRune(d[:], escapedRune) - unescapedData = append(unescapedData, data[:i]...) - unescapedData = append(unescapedData, d[:s]...) - - data = data[i+escapedBytes:] - } - - if unescapedData != nil { - r.token.byteValue = append(unescapedData, data...) - r.token.byteValueCloned = true - } - return -} - -// getu4 decodes \uXXXX from the beginning of s, returning the hex value, -// or it returns -1. -func getu4(s []byte) rune { - if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { - return -1 - } - var val rune - for i := 2; i < len(s) && i < 6; i++ { - var v byte - c := s[i] - switch c { - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - v = c - '0' - case 'a', 'b', 'c', 'd', 'e', 'f': - v = c - 'a' + 10 - case 'A', 'B', 'C', 'D', 'E', 'F': - v = c - 'A' + 10 - default: - return -1 - } - - val <<= 4 - val |= rune(v) - } - return val -} - -// decodeEscape processes a single escape sequence and returns number of bytes processed. -func decodeEscape(data []byte) (decoded rune, bytesProcessed int, err error) { - if len(data) < 2 { - return 0, 0, errors.New("incorrect escape symbol \\ at the end of token") - } - - c := data[1] - switch c { - case '"', '/', '\\': - return rune(c), 2, nil - case 'b': - return '\b', 2, nil - case 'f': - return '\f', 2, nil - case 'n': - return '\n', 2, nil - case 'r': - return '\r', 2, nil - case 't': - return '\t', 2, nil - case 'u': - rr := getu4(data) - if rr < 0 { - return 0, 0, errors.New("incorrectly escaped \\uXXXX sequence") - } - - read := 6 - if utf16.IsSurrogate(rr) { - rr1 := getu4(data[read:]) - if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { - read += 6 - rr = dec - } else { - rr = unicode.ReplacementChar - } - } - return rr, read, nil - } - - return 0, 0, errors.New("incorrectly escaped bytes") -} - -// fetchString scans a string literal token. -func (r *Lexer) fetchString() { - r.pos++ - data := r.Data[r.pos:] - - isValid, length := findStringLen(data) - if !isValid { - r.pos += length - r.errParse("unterminated string literal") - return - } - r.token.byteValue = data[:length] - r.pos += length + 1 // skip closing '"' as well -} - -// scanToken scans the next token if no token is currently available in the lexer. -func (r *Lexer) scanToken() { - if r.token.kind != TokenUndef || r.fatalError != nil { - return - } - - r.FetchToken() -} - -// consume resets the current token to allow scanning the next one. -func (r *Lexer) consume() { - r.token.kind = TokenUndef - r.token.byteValueCloned = false - r.token.delimValue = 0 -} - -// Ok returns true if no error (including io.EOF) was encountered during scanning. -func (r *Lexer) Ok() bool { - return r.fatalError == nil -} - -const maxErrorContextLen = 13 - -func (r *Lexer) errParse(what string) { - if r.fatalError == nil { - var str string - if len(r.Data)-r.pos <= maxErrorContextLen { - str = string(r.Data) - } else { - str = string(r.Data[r.pos:r.pos+maxErrorContextLen-3]) + "..." - } - r.fatalError = &LexerError{ - Reason: what, - Offset: r.pos, - Data: str, - } - } -} - -func (r *Lexer) errSyntax() { - r.errParse("syntax error") -} - -func (r *Lexer) errInvalidToken(expected string) { - if r.fatalError != nil { - return - } - if r.UseMultipleErrors { - r.pos = r.start - r.consume() - r.SkipRecursive() - switch expected { - case "[": - r.token.delimValue = ']' - r.token.kind = TokenDelim - case "{": - r.token.delimValue = '}' - r.token.kind = TokenDelim - } - r.addNonfatalError(&LexerError{ - Reason: fmt.Sprintf("expected %s", expected), - Offset: r.start, - Data: string(r.Data[r.start:r.pos]), - }) - return - } - - var str string - if len(r.token.byteValue) <= maxErrorContextLen { - str = string(r.token.byteValue) - } else { - str = string(r.token.byteValue[:maxErrorContextLen-3]) + "..." - } - r.fatalError = &LexerError{ - Reason: fmt.Sprintf("expected %s", expected), - Offset: r.pos, - Data: str, - } -} - -func (r *Lexer) GetPos() int { - return r.pos -} - -// Delim consumes a token and verifies that it is the given delimiter. -func (r *Lexer) Delim(c byte) { - if r.token.kind == TokenUndef && r.Ok() { - r.FetchToken() - } - - if !r.Ok() || r.token.delimValue != c { - r.consume() // errInvalidToken can change token if UseMultipleErrors is enabled. - r.errInvalidToken(string([]byte{c})) - } else { - r.consume() - } -} - -// IsDelim returns true if there was no scanning error and next token is the given delimiter. -func (r *Lexer) IsDelim(c byte) bool { - if r.token.kind == TokenUndef && r.Ok() { - r.FetchToken() - } - return !r.Ok() || r.token.delimValue == c -} - -// Null verifies that the next token is null and consumes it. -func (r *Lexer) Null() { - if r.token.kind == TokenUndef && r.Ok() { - r.FetchToken() - } - if !r.Ok() || r.token.kind != TokenNull { - r.errInvalidToken("null") - } - r.consume() -} - -// IsNull returns true if the next token is a null keyword. -func (r *Lexer) IsNull() bool { - if r.token.kind == TokenUndef && r.Ok() { - r.FetchToken() - } - return r.Ok() && r.token.kind == TokenNull -} - -// Skip skips a single token. -func (r *Lexer) Skip() { - if r.token.kind == TokenUndef && r.Ok() { - r.FetchToken() - } - r.consume() -} - -// SkipRecursive skips next array or object completely, or just skips a single token if not -// an array/object. -// -// Note: no syntax validation is performed on the skipped data. -func (r *Lexer) SkipRecursive() { - r.scanToken() - var start, end byte - startPos := r.start - - switch r.token.delimValue { - case '{': - start, end = '{', '}' - case '[': - start, end = '[', ']' - default: - r.consume() - return - } - - r.consume() - - level := 1 - inQuotes := false - wasEscape := false - - for i, c := range r.Data[r.pos:] { - switch { - case c == start && !inQuotes: - level++ - case c == end && !inQuotes: - level-- - if level == 0 { - r.pos += i + 1 - if !json.Valid(r.Data[startPos:r.pos]) { - r.pos = len(r.Data) - r.fatalError = &LexerError{ - Reason: "skipped array/object json value is invalid", - Offset: r.pos, - Data: string(r.Data[r.pos:]), - } - } - return - } - case c == '\\' && inQuotes: - wasEscape = !wasEscape - continue - case c == '"' && inQuotes: - inQuotes = wasEscape - case c == '"': - inQuotes = true - } - wasEscape = false - } - r.pos = len(r.Data) - r.fatalError = &LexerError{ - Reason: "EOF reached while skipping array/object or token", - Offset: r.pos, - Data: string(r.Data[r.pos:]), - } -} - -// Raw fetches the next item recursively as a data slice -func (r *Lexer) Raw() []byte { - r.SkipRecursive() - if !r.Ok() { - return nil - } - return r.Data[r.start:r.pos] -} - -// IsStart returns whether the lexer is positioned at the start -// of an input string. -func (r *Lexer) IsStart() bool { - return r.pos == 0 -} - -// Consumed reads all remaining bytes from the input, publishing an error if -// there is anything but whitespace remaining. -func (r *Lexer) Consumed() { - if r.pos > len(r.Data) || !r.Ok() { - return - } - - for _, c := range r.Data[r.pos:] { - if c != ' ' && c != '\t' && c != '\r' && c != '\n' { - r.AddError(&LexerError{ - Reason: "invalid character '" + string(c) + "' after top-level value", - Offset: r.pos, - Data: string(r.Data[r.pos:]), - }) - return - } - - r.pos++ - r.start++ - } -} - -func (r *Lexer) unsafeString(skipUnescape bool) (string, []byte) { - if r.token.kind == TokenUndef && r.Ok() { - r.FetchToken() - } - if !r.Ok() || r.token.kind != TokenString { - r.errInvalidToken("string") - return "", nil - } - if !skipUnescape { - if err := r.unescapeStringToken(); err != nil { - r.errInvalidToken("string") - return "", nil - } - } - - bytes := r.token.byteValue - ret := bytesToStr(r.token.byteValue) - r.consume() - return ret, bytes -} - -// UnsafeString returns the string value if the token is a string literal. -// -// Warning: returned string may point to the input buffer, so the string should not outlive -// the input buffer. Intended pattern of usage is as an argument to a switch statement. -func (r *Lexer) UnsafeString() string { - ret, _ := r.unsafeString(false) - return ret -} - -// UnsafeBytes returns the byte slice if the token is a string literal. -func (r *Lexer) UnsafeBytes() []byte { - _, ret := r.unsafeString(false) - return ret -} - -// UnsafeFieldName returns current member name string token -func (r *Lexer) UnsafeFieldName(skipUnescape bool) string { - ret, _ := r.unsafeString(skipUnescape) - return ret -} - -// String reads a string literal. -func (r *Lexer) String() string { - if r.token.kind == TokenUndef && r.Ok() { - r.FetchToken() - } - if !r.Ok() || r.token.kind != TokenString { - r.errInvalidToken("string") - return "" - } - if err := r.unescapeStringToken(); err != nil { - r.errInvalidToken("string") - return "" - } - var ret string - if r.token.byteValueCloned { - ret = bytesToStr(r.token.byteValue) - } else { - ret = string(r.token.byteValue) - } - r.consume() - return ret -} - -// StringIntern reads a string literal, and performs string interning on it. -func (r *Lexer) StringIntern() string { - if r.token.kind == TokenUndef && r.Ok() { - r.FetchToken() - } - if !r.Ok() || r.token.kind != TokenString { - r.errInvalidToken("string") - return "" - } - if err := r.unescapeStringToken(); err != nil { - r.errInvalidToken("string") - return "" - } - ret := intern.Bytes(r.token.byteValue) - r.consume() - return ret -} - -// Bytes reads a string literal and base64 decodes it into a byte slice. -func (r *Lexer) Bytes() []byte { - if r.token.kind == TokenUndef && r.Ok() { - r.FetchToken() - } - if !r.Ok() || r.token.kind != TokenString { - r.errInvalidToken("string") - return nil - } - if err := r.unescapeStringToken(); err != nil { - r.errInvalidToken("string") - return nil - } - ret := make([]byte, base64.StdEncoding.DecodedLen(len(r.token.byteValue))) - n, err := base64.StdEncoding.Decode(ret, r.token.byteValue) - if err != nil { - r.fatalError = &LexerError{ - Reason: err.Error(), - } - return nil - } - - r.consume() - return ret[:n] -} - -// Bool reads a true or false boolean keyword. -func (r *Lexer) Bool() bool { - if r.token.kind == TokenUndef && r.Ok() { - r.FetchToken() - } - if !r.Ok() || r.token.kind != TokenBool { - r.errInvalidToken("bool") - return false - } - ret := r.token.boolValue - r.consume() - return ret -} - -func (r *Lexer) number() string { - if r.token.kind == TokenUndef && r.Ok() { - r.FetchToken() - } - if !r.Ok() || r.token.kind != TokenNumber { - r.errInvalidToken("number") - return "" - } - ret := bytesToStr(r.token.byteValue) - r.consume() - return ret -} - -func (r *Lexer) Uint8() uint8 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseUint(s, 10, 8) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: s, - }) - } - return uint8(n) -} - -func (r *Lexer) Uint16() uint16 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseUint(s, 10, 16) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: s, - }) - } - return uint16(n) -} - -func (r *Lexer) Uint32() uint32 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseUint(s, 10, 32) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: s, - }) - } - return uint32(n) -} - -func (r *Lexer) Uint64() uint64 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseUint(s, 10, 64) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: s, - }) - } - return n -} - -func (r *Lexer) Uint() uint { - return uint(r.Uint64()) -} - -func (r *Lexer) Int8() int8 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseInt(s, 10, 8) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: s, - }) - } - return int8(n) -} - -func (r *Lexer) Int16() int16 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseInt(s, 10, 16) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: s, - }) - } - return int16(n) -} - -func (r *Lexer) Int32() int32 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseInt(s, 10, 32) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: s, - }) - } - return int32(n) -} - -func (r *Lexer) Int64() int64 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseInt(s, 10, 64) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: s, - }) - } - return n -} - -func (r *Lexer) Int() int { - return int(r.Int64()) -} - -func (r *Lexer) Uint8Str() uint8 { - s, b := r.unsafeString(false) - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseUint(s, 10, 8) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: string(b), - }) - } - return uint8(n) -} - -func (r *Lexer) Uint16Str() uint16 { - s, b := r.unsafeString(false) - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseUint(s, 10, 16) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: string(b), - }) - } - return uint16(n) -} - -func (r *Lexer) Uint32Str() uint32 { - s, b := r.unsafeString(false) - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseUint(s, 10, 32) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: string(b), - }) - } - return uint32(n) -} - -func (r *Lexer) Uint64Str() uint64 { - s, b := r.unsafeString(false) - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseUint(s, 10, 64) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: string(b), - }) - } - return n -} - -func (r *Lexer) UintStr() uint { - return uint(r.Uint64Str()) -} - -func (r *Lexer) UintptrStr() uintptr { - return uintptr(r.Uint64Str()) -} - -func (r *Lexer) Int8Str() int8 { - s, b := r.unsafeString(false) - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseInt(s, 10, 8) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: string(b), - }) - } - return int8(n) -} - -func (r *Lexer) Int16Str() int16 { - s, b := r.unsafeString(false) - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseInt(s, 10, 16) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: string(b), - }) - } - return int16(n) -} - -func (r *Lexer) Int32Str() int32 { - s, b := r.unsafeString(false) - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseInt(s, 10, 32) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: string(b), - }) - } - return int32(n) -} - -func (r *Lexer) Int64Str() int64 { - s, b := r.unsafeString(false) - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseInt(s, 10, 64) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: string(b), - }) - } - return n -} - -func (r *Lexer) IntStr() int { - return int(r.Int64Str()) -} - -func (r *Lexer) Float32() float32 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseFloat(s, 32) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: s, - }) - } - return float32(n) -} - -func (r *Lexer) Float32Str() float32 { - s, b := r.unsafeString(false) - if !r.Ok() { - return 0 - } - n, err := strconv.ParseFloat(s, 32) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: string(b), - }) - } - return float32(n) -} - -func (r *Lexer) Float64() float64 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseFloat(s, 64) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: s, - }) - } - return n -} - -func (r *Lexer) Float64Str() float64 { - s, b := r.unsafeString(false) - if !r.Ok() { - return 0 - } - n, err := strconv.ParseFloat(s, 64) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: string(b), - }) - } - return n -} - -func (r *Lexer) Error() error { - return r.fatalError -} - -func (r *Lexer) AddError(e error) { - if r.fatalError == nil { - r.fatalError = e - } -} - -func (r *Lexer) AddNonFatalError(e error) { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Data: string(r.Data[r.start:r.pos]), - Reason: e.Error(), - }) -} - -func (r *Lexer) addNonfatalError(err *LexerError) { - if r.UseMultipleErrors { - // We don't want to add errors with the same offset. - if len(r.multipleErrors) != 0 && r.multipleErrors[len(r.multipleErrors)-1].Offset == err.Offset { - return - } - r.multipleErrors = append(r.multipleErrors, err) - return - } - r.fatalError = err -} - -func (r *Lexer) GetNonFatalErrors() []*LexerError { - return r.multipleErrors -} - -// JsonNumber fetches and json.Number from 'encoding/json' package. -// Both int, float or string, contains them are valid values -func (r *Lexer) JsonNumber() json.Number { - if r.token.kind == TokenUndef && r.Ok() { - r.FetchToken() - } - if !r.Ok() { - r.errInvalidToken("json.Number") - return json.Number("") - } - - switch r.token.kind { - case TokenString: - return json.Number(r.String()) - case TokenNumber: - return json.Number(r.Raw()) - case TokenNull: - r.Null() - return json.Number("") - default: - r.errSyntax() - return json.Number("") - } -} - -// Interface fetches an interface{} analogous to the 'encoding/json' package. -func (r *Lexer) Interface() interface{} { - if r.token.kind == TokenUndef && r.Ok() { - r.FetchToken() - } - - if !r.Ok() { - return nil - } - switch r.token.kind { - case TokenString: - return r.String() - case TokenNumber: - return r.Float64() - case TokenBool: - return r.Bool() - case TokenNull: - r.Null() - return nil - } - - if r.token.delimValue == '{' { - r.consume() - - ret := map[string]interface{}{} - for !r.IsDelim('}') { - key := r.String() - r.WantColon() - ret[key] = r.Interface() - r.WantComma() - } - r.Delim('}') - - if r.Ok() { - return ret - } else { - return nil - } - } else if r.token.delimValue == '[' { - r.consume() - - ret := []interface{}{} - for !r.IsDelim(']') { - ret = append(ret, r.Interface()) - r.WantComma() - } - r.Delim(']') - - if r.Ok() { - return ret - } else { - return nil - } - } - r.errSyntax() - return nil -} - -// WantComma requires a comma to be present before fetching next token. -func (r *Lexer) WantComma() { - r.wantSep = ',' - r.firstElement = false -} - -// WantColon requires a colon to be present before fetching next token. -func (r *Lexer) WantColon() { - r.wantSep = ':' - r.firstElement = false -} - -// CurrentToken returns current token kind if there were no errors and TokenUndef otherwise -func (r *Lexer) CurrentToken() TokenKind { - if r.token.kind == TokenUndef && r.Ok() { - r.FetchToken() - } - - if !r.Ok() { - return TokenUndef - } - - return r.token.kind -} diff --git a/vendor/github.com/mailru/easyjson/jwriter/writer.go b/vendor/github.com/mailru/easyjson/jwriter/writer.go deleted file mode 100644 index 34b0ade46..000000000 --- a/vendor/github.com/mailru/easyjson/jwriter/writer.go +++ /dev/null @@ -1,417 +0,0 @@ -// Package jwriter contains a JSON writer. -package jwriter - -import ( - "io" - "strconv" - "unicode/utf8" - - "github.com/mailru/easyjson/buffer" -) - -// Flags describe various encoding options. The behavior may be actually implemented in the encoder, but -// Flags field in Writer is used to set and pass them around. -type Flags int - -const ( - NilMapAsEmpty Flags = 1 << iota // Encode nil map as '{}' rather than 'null'. - NilSliceAsEmpty // Encode nil slice as '[]' rather than 'null'. -) - -// Writer is a JSON writer. -type Writer struct { - Flags Flags - - Error error - Buffer buffer.Buffer - NoEscapeHTML bool -} - -// Size returns the size of the data that was written out. -func (w *Writer) Size() int { - return w.Buffer.Size() -} - -// DumpTo outputs the data to given io.Writer, resetting the buffer. -func (w *Writer) DumpTo(out io.Writer) (written int, err error) { - return w.Buffer.DumpTo(out) -} - -// BuildBytes returns writer data as a single byte slice. You can optionally provide one byte slice -// as argument that it will try to reuse. -func (w *Writer) BuildBytes(reuse ...[]byte) ([]byte, error) { - if w.Error != nil { - return nil, w.Error - } - - return w.Buffer.BuildBytes(reuse...), nil -} - -// ReadCloser returns an io.ReadCloser that can be used to read the data. -// ReadCloser also resets the buffer. -func (w *Writer) ReadCloser() (io.ReadCloser, error) { - if w.Error != nil { - return nil, w.Error - } - - return w.Buffer.ReadCloser(), nil -} - -// RawByte appends raw binary data to the buffer. -func (w *Writer) RawByte(c byte) { - w.Buffer.AppendByte(c) -} - -// RawByte appends raw binary data to the buffer. -func (w *Writer) RawString(s string) { - w.Buffer.AppendString(s) -} - -// RawBytesString appends string from bytes to the buffer. -func (w *Writer) RawBytesString(data []byte, err error) { - switch { - case w.Error != nil: - return - case err != nil: - w.Error = err - default: - w.String(string(data)) - } -} - -// Raw appends raw binary data to the buffer or sets the error if it is given. Useful for -// calling with results of MarshalJSON-like functions. -func (w *Writer) Raw(data []byte, err error) { - switch { - case w.Error != nil: - return - case err != nil: - w.Error = err - case len(data) > 0: - w.Buffer.AppendBytes(data) - default: - w.RawString("null") - } -} - -// RawText encloses raw binary data in quotes and appends in to the buffer. -// Useful for calling with results of MarshalText-like functions. -func (w *Writer) RawText(data []byte, err error) { - switch { - case w.Error != nil: - return - case err != nil: - w.Error = err - case len(data) > 0: - w.String(string(data)) - default: - w.RawString("null") - } -} - -// Base64Bytes appends data to the buffer after base64 encoding it -func (w *Writer) Base64Bytes(data []byte) { - if data == nil { - w.Buffer.AppendString("null") - return - } - w.Buffer.AppendByte('"') - w.base64(data) - w.Buffer.AppendByte('"') -} - -func (w *Writer) Uint8(n uint8) { - w.Buffer.EnsureSpace(3) - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) -} - -func (w *Writer) Uint16(n uint16) { - w.Buffer.EnsureSpace(5) - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) -} - -func (w *Writer) Uint32(n uint32) { - w.Buffer.EnsureSpace(10) - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) -} - -func (w *Writer) Uint(n uint) { - w.Buffer.EnsureSpace(20) - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) -} - -func (w *Writer) Uint64(n uint64) { - w.Buffer.EnsureSpace(20) - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, n, 10) -} - -func (w *Writer) Int8(n int8) { - w.Buffer.EnsureSpace(4) - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) -} - -func (w *Writer) Int16(n int16) { - w.Buffer.EnsureSpace(6) - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) -} - -func (w *Writer) Int32(n int32) { - w.Buffer.EnsureSpace(11) - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) -} - -func (w *Writer) Int(n int) { - w.Buffer.EnsureSpace(21) - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) -} - -func (w *Writer) Int64(n int64) { - w.Buffer.EnsureSpace(21) - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, n, 10) -} - -func (w *Writer) Uint8Str(n uint8) { - w.Buffer.EnsureSpace(3) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) Uint16Str(n uint16) { - w.Buffer.EnsureSpace(5) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) Uint32Str(n uint32) { - w.Buffer.EnsureSpace(10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) UintStr(n uint) { - w.Buffer.EnsureSpace(20) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) Uint64Str(n uint64) { - w.Buffer.EnsureSpace(20) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, n, 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) UintptrStr(n uintptr) { - w.Buffer.EnsureSpace(20) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) Int8Str(n int8) { - w.Buffer.EnsureSpace(4) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) Int16Str(n int16) { - w.Buffer.EnsureSpace(6) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) Int32Str(n int32) { - w.Buffer.EnsureSpace(11) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) IntStr(n int) { - w.Buffer.EnsureSpace(21) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) Int64Str(n int64) { - w.Buffer.EnsureSpace(21) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, n, 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) Float32(n float32) { - w.Buffer.EnsureSpace(20) - w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 32) -} - -func (w *Writer) Float32Str(n float32) { - w.Buffer.EnsureSpace(20) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 32) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) Float64(n float64) { - w.Buffer.EnsureSpace(20) - w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, n, 'g', -1, 64) -} - -func (w *Writer) Float64Str(n float64) { - w.Buffer.EnsureSpace(20) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 64) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) Bool(v bool) { - w.Buffer.EnsureSpace(5) - if v { - w.Buffer.Buf = append(w.Buffer.Buf, "true"...) - } else { - w.Buffer.Buf = append(w.Buffer.Buf, "false"...) - } -} - -const chars = "0123456789abcdef" - -func getTable(falseValues ...int) [128]bool { - table := [128]bool{} - - for i := 0; i < 128; i++ { - table[i] = true - } - - for _, v := range falseValues { - table[v] = false - } - - return table -} - -var ( - htmlEscapeTable = getTable(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, '"', '&', '<', '>', '\\') - htmlNoEscapeTable = getTable(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, '"', '\\') -) - -func (w *Writer) String(s string) { - w.Buffer.AppendByte('"') - - // Portions of the string that contain no escapes are appended as - // byte slices. - - p := 0 // last non-escape symbol - - escapeTable := &htmlEscapeTable - if w.NoEscapeHTML { - escapeTable = &htmlNoEscapeTable - } - - for i := 0; i < len(s); { - c := s[i] - - if c < utf8.RuneSelf { - if escapeTable[c] { - // single-width character, no escaping is required - i++ - continue - } - - w.Buffer.AppendString(s[p:i]) - switch c { - case '\t': - w.Buffer.AppendString(`\t`) - case '\r': - w.Buffer.AppendString(`\r`) - case '\n': - w.Buffer.AppendString(`\n`) - case '\\': - w.Buffer.AppendString(`\\`) - case '"': - w.Buffer.AppendString(`\"`) - default: - w.Buffer.AppendString(`\u00`) - w.Buffer.AppendByte(chars[c>>4]) - w.Buffer.AppendByte(chars[c&0xf]) - } - - i++ - p = i - continue - } - - // broken utf - runeValue, runeWidth := utf8.DecodeRuneInString(s[i:]) - if runeValue == utf8.RuneError && runeWidth == 1 { - w.Buffer.AppendString(s[p:i]) - w.Buffer.AppendString(`\ufffd`) - i++ - p = i - continue - } - - // jsonp stuff - tab separator and line separator - if runeValue == '\u2028' || runeValue == '\u2029' { - w.Buffer.AppendString(s[p:i]) - w.Buffer.AppendString(`\u202`) - w.Buffer.AppendByte(chars[runeValue&0xf]) - i += runeWidth - p = i - continue - } - i += runeWidth - } - w.Buffer.AppendString(s[p:]) - w.Buffer.AppendByte('"') -} - -const encode = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/" -const padChar = '=' - -func (w *Writer) base64(in []byte) { - - if len(in) == 0 { - return - } - - w.Buffer.EnsureSpace(((len(in)-1)/3 + 1) * 4) - - si := 0 - n := (len(in) / 3) * 3 - - for si < n { - // Convert 3x 8bit source bytes into 4 bytes - val := uint(in[si+0])<<16 | uint(in[si+1])<<8 | uint(in[si+2]) - - w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>18&0x3F], encode[val>>12&0x3F], encode[val>>6&0x3F], encode[val&0x3F]) - - si += 3 - } - - remain := len(in) - si - if remain == 0 { - return - } - - // Add the remaining small block - val := uint(in[si+0]) << 16 - if remain == 2 { - val |= uint(in[si+1]) << 8 - } - - w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>18&0x3F], encode[val>>12&0x3F]) - - switch remain { - case 2: - w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>6&0x3F], byte(padChar)) - case 1: - w.Buffer.Buf = append(w.Buffer.Buf, byte(padChar), byte(padChar)) - } -} diff --git a/vendor/github.com/mailru/easyjson/raw.go b/vendor/github.com/mailru/easyjson/raw.go deleted file mode 100644 index ee7367ae5..000000000 --- a/vendor/github.com/mailru/easyjson/raw.go +++ /dev/null @@ -1,46 +0,0 @@ -package easyjson - -import ( - "github.com/mailru/easyjson/jlexer" - "github.com/mailru/easyjson/jwriter" -) - -// RawMessage is a raw piece of JSON (number, string, bool, object, array or -// null) that is extracted without parsing and output as is during marshaling. -type RawMessage []byte - -// MarshalEasyJSON does JSON marshaling using easyjson interface. -func (v *RawMessage) MarshalEasyJSON(w *jwriter.Writer) { - if len(*v) == 0 { - w.RawString("null") - } else { - w.Raw(*v, nil) - } -} - -// UnmarshalEasyJSON does JSON unmarshaling using easyjson interface. -func (v *RawMessage) UnmarshalEasyJSON(l *jlexer.Lexer) { - *v = RawMessage(l.Raw()) -} - -// UnmarshalJSON implements encoding/json.Unmarshaler interface. -func (v *RawMessage) UnmarshalJSON(data []byte) error { - *v = make([]byte, len(data)) - copy(*v, data) - return nil -} - -var nullBytes = []byte("null") - -// MarshalJSON implements encoding/json.Marshaler interface. -func (v RawMessage) MarshalJSON() ([]byte, error) { - if len(v) == 0 { - return nullBytes, nil - } - return v, nil -} - -// IsDefined is required for integration with omitempty easyjson logic. -func (v *RawMessage) IsDefined() bool { - return len(*v) > 0 -} diff --git a/vendor/github.com/mailru/easyjson/unknown_fields.go b/vendor/github.com/mailru/easyjson/unknown_fields.go deleted file mode 100644 index 55538eac9..000000000 --- a/vendor/github.com/mailru/easyjson/unknown_fields.go +++ /dev/null @@ -1,32 +0,0 @@ -package easyjson - -import ( - jlexer "github.com/mailru/easyjson/jlexer" - "github.com/mailru/easyjson/jwriter" -) - -// UnknownFieldsProxy implemets UnknownsUnmarshaler and UnknownsMarshaler -// use it as embedded field in your structure to parse and then serialize unknown struct fields -type UnknownFieldsProxy struct { - unknownFields map[string][]byte -} - -func (s *UnknownFieldsProxy) UnmarshalUnknown(in *jlexer.Lexer, key string) { - if s.unknownFields == nil { - s.unknownFields = make(map[string][]byte, 1) - } - s.unknownFields[key] = in.Raw() -} - -func (s UnknownFieldsProxy) MarshalUnknowns(out *jwriter.Writer, first bool) { - for key, val := range s.unknownFields { - if first { - first = false - } else { - out.RawByte(',') - } - out.String(string(key)) - out.RawByte(':') - out.Raw(val, nil) - } -} diff --git a/vendor/github.com/operator-framework/api/pkg/lib/release/release.go b/vendor/github.com/operator-framework/api/pkg/lib/release/release.go new file mode 100644 index 000000000..e91adf4c3 --- /dev/null +++ b/vendor/github.com/operator-framework/api/pkg/lib/release/release.go @@ -0,0 +1,73 @@ +package release + +import ( + "encoding/json" + "slices" + "strings" + + semver "github.com/blang/semver/v4" +) + +// +k8s:openapi-gen=true +// OperatorRelease is a wrapper around a slice of semver.PRVersion which supports correct +// marshaling to YAML and JSON. +// +kubebuilder:validation:Type=string +// +kubebuilder:validation:MaxLength=20 +// +kubebuilder:validation:XValidation:rule="self.matches('^[0-9A-Za-z-]+(\\\\.[0-9A-Za-z-]+)*$')",message="release version must be composed of dot-separated identifiers containing only alphanumerics and hyphens" +// +kubebuilder:validation:XValidation:rule="!self.split('.').exists(x, x.matches('^0[0-9]+$'))",message="numeric identifiers in release version must not have leading zeros" +type OperatorRelease struct { + Release []semver.PRVersion `json:"-"` +} + +// DeepCopyInto creates a deep-copy of the Version value. +func (v *OperatorRelease) DeepCopyInto(out *OperatorRelease) { + out.Release = slices.Clone(v.Release) +} + +// MarshalJSON implements the encoding/json.Marshaler interface. +func (v OperatorRelease) MarshalJSON() ([]byte, error) { + segments := []string{} + for _, segment := range v.Release { + segments = append(segments, segment.String()) + } + return json.Marshal(strings.Join(segments, ".")) +} + +// UnmarshalJSON implements the encoding/json.Unmarshaler interface. +func (v *OperatorRelease) UnmarshalJSON(data []byte) (err error) { + var versionString string + + if err = json.Unmarshal(data, &versionString); err != nil { + return + } + + segments := strings.Split(versionString, ".") + for _, segment := range segments { + release, err := semver.NewPRVersion(segment) + if err != nil { + return err + } + v.Release = append(v.Release, release) + } + + return nil +} + +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (_ OperatorRelease) OpenAPISchemaType() []string { return []string{"string"} } + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// "semver" is not a standard openapi format but tooling may use the value regardless +func (_ OperatorRelease) OpenAPISchemaFormat() string { return "semver" } + +func (r OperatorRelease) String() string { + segments := []string{} + for _, segment := range r.Release { + segments = append(segments, segment.String()) + } + return strings.Join(segments, ".") +} diff --git a/vendor/github.com/operator-framework/api/pkg/manifests/bundleloader.go b/vendor/github.com/operator-framework/api/pkg/manifests/bundleloader.go index 397190a6a..6ed1e4ddf 100644 --- a/vendor/github.com/operator-framework/api/pkg/manifests/bundleloader.go +++ b/vendor/github.com/operator-framework/api/pkg/manifests/bundleloader.go @@ -39,6 +39,7 @@ func (b *bundleLoader) LoadBundle() error { errs = append(errs, b.calculateCompressedBundleSize()) b.addChannelsFromAnnotationsFile() + b.addPackageFromAnnotationsFile() if !b.foundCSV { errs = append(errs, fmt.Errorf("unable to find a csv in bundle directory %s", b.dir)) @@ -68,6 +69,14 @@ func (b *bundleLoader) addChannelsFromAnnotationsFile() { } } +func (b *bundleLoader) addPackageFromAnnotationsFile() { + if b.bundle == nil { + // None of this is relevant if the bundle was not found + return + } + b.bundle.Package = b.annotationsFile.Annotations.PackageName +} + // Compress the bundle to check its size func (b *bundleLoader) calculateCompressedBundleSize() error { if b.bundle == nil { diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/clusterserviceversion_types.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/clusterserviceversion_types.go index 3e6d32480..1efb4323c 100644 --- a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/clusterserviceversion_types.go +++ b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/clusterserviceversion_types.go @@ -13,6 +13,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/intstr" + "github.com/operator-framework/api/pkg/lib/release" "github.com/operator-framework/api/pkg/lib/version" ) @@ -274,8 +275,25 @@ type APIServiceDefinitions struct { // that can manage apps for a given version. // +k8s:openapi-gen=true type ClusterServiceVersionSpec struct { - InstallStrategy NamedInstallStrategy `json:"install"` - Version version.OperatorVersion `json:"version,omitempty"` + InstallStrategy NamedInstallStrategy `json:"install"` + Version version.OperatorVersion `json:"version,omitempty"` + // release specifies the packaging version of the operator, defaulting to empty + // release is optional + // + // A ClusterServiceVersion's release field is used to distinguish between different builds of the same operator version + // This is useful for operators that need to make changes to the CSV which don't affect their functionality, + // for example: + // - to fix a typo in their description + // - to add/amend annotations or labels + // - to amend examples or documentation + // - to produce different builds for different environments + // + // It is up to operator authors to determine the semantics of release versions they use + // for their operator. All release versions must conform to the semver prerelease format + // (dot-separated identifiers containing only alphanumerics and hyphens) and are limited + // to a maximum length of 20 characters. + // +optional + Release release.OperatorRelease `json:"release,omitzero"` Maturity string `json:"maturity,omitempty"` CustomResourceDefinitions CustomResourceDefinitions `json:"customresourcedefinitions,omitempty"` APIServiceDefinitions APIServiceDefinitions `json:"apiservicedefinitions,omitempty"` @@ -595,6 +613,7 @@ type ResourceInstance struct { // +kubebuilder:subresource:status // +kubebuilder:printcolumn:name="Display",type=string,JSONPath=`.spec.displayName`,description="The name of the CSV" // +kubebuilder:printcolumn:name="Version",type=string,JSONPath=`.spec.version`,description="The version of the CSV" +// +kubebuilder:printcolumn:name="Release",type=string,JSONPath=`.spec.release`,description="The release of this version of the CSV" // +kubebuilder:printcolumn:name="Replaces",type=string,JSONPath=`.spec.replaces`,description="The name of a CSV that this one replaces" // +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/zz_generated.deepcopy.go index 684a7432a..685fa26a3 100644 --- a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/zz_generated.deepcopy.go @@ -501,6 +501,7 @@ func (in *ClusterServiceVersionSpec) DeepCopyInto(out *ClusterServiceVersionSpec *out = *in in.InstallStrategy.DeepCopyInto(&out.InstallStrategy) in.Version.DeepCopyInto(&out.Version) + in.Release.DeepCopyInto(&out.Release) in.CustomResourceDefinitions.DeepCopyInto(&out.CustomResourceDefinitions) in.APIServiceDefinitions.DeepCopyInto(&out.APIServiceDefinitions) if in.WebhookDefinitions != nil { diff --git a/vendor/github.com/operator-framework/api/pkg/validation/internal/bundle.go b/vendor/github.com/operator-framework/api/pkg/validation/internal/bundle.go index 9bb90a04d..c5f7ba18d 100644 --- a/vendor/github.com/operator-framework/api/pkg/validation/internal/bundle.go +++ b/vendor/github.com/operator-framework/api/pkg/validation/internal/bundle.go @@ -42,9 +42,26 @@ func validateBundle(bundle *manifests.Bundle) (result errors.ManifestResult) { if sizeErrors != nil { result.Add(sizeErrors...) } + nameErrors := validateBundleName(bundle) + if nameErrors != nil { + result.Add(nameErrors...) + } return result } +func validateBundleName(bundle *manifests.Bundle) []errors.Error { + var errs []errors.Error + // bundle naming with a specified release version must follow the pattern + // -v- + if len(bundle.CSV.Spec.Release.Release) > 0 { + expectedName := fmt.Sprintf("%s-v%s-%s", bundle.Package, bundle.CSV.Spec.Version.String(), bundle.CSV.Spec.Release.String()) + if bundle.Name != expectedName { + errs = append(errs, errors.ErrInvalidBundle(fmt.Sprintf("bundle name with release versioning %q does not match expected name %q", bundle.Name, expectedName), bundle.Name)) + } + } + return errs +} + func validateServiceAccounts(bundle *manifests.Bundle) []errors.Error { // get service account names defined in the csv saNamesFromCSV := make(map[string]struct{}, 0) diff --git a/vendor/github.com/operator-framework/api/pkg/validation/internal/typecheck.go b/vendor/github.com/operator-framework/api/pkg/validation/internal/typecheck.go index 197ef5de8..c5eafd454 100644 --- a/vendor/github.com/operator-framework/api/pkg/validation/internal/typecheck.go +++ b/vendor/github.com/operator-framework/api/pkg/validation/internal/typecheck.go @@ -28,7 +28,7 @@ func checkEmptyFields(result *errors.ManifestResult, v reflect.Value, parentStru // Omitted field tags will contain ",omitempty", and ignored tags will // match "-" exactly, respectively. - isOptionalField := strings.Contains(tag, ",omitempty") || tag == "-" + isOptionalField := strings.Contains(tag, ",omitempty") || strings.Contains(tag, ",omitzero") || tag == "-" emptyVal := fieldValue.IsZero() newParentStructName := fieldType.Name diff --git a/vendor/github.com/prometheus/procfs/.golangci.yml b/vendor/github.com/prometheus/procfs/.golangci.yml index 3c3bf910f..23ecd4505 100644 --- a/vendor/github.com/prometheus/procfs/.golangci.yml +++ b/vendor/github.com/prometheus/procfs/.golangci.yml @@ -1,7 +1,9 @@ version: "2" linters: enable: + - errorlint - forbidigo + - gocritic - godot - misspell - revive @@ -11,6 +13,20 @@ linters: forbid: - pattern: ^fmt\.Print.*$ msg: Do not commit print statements. + gocritic: + enable-all: true + disabled-checks: + - commentFormatting + - commentedOutCode + - deferInLoop + - filepathJoin + - hugeParam + - importShadow + - paramTypeCombine + - rangeValCopy + - tooManyResultsChecker + - unnamedResult + - whyNoLint godot: exclude: # Ignore "See: URL". @@ -19,16 +35,12 @@ linters: misspell: locale: US exclusions: - generated: lax presets: - comments - common-false-positives - legacy - std-error-handling - paths: - - third_party$ - - builtin$ - - examples$ + warn-unused: true formatters: enable: - gofmt @@ -37,9 +49,3 @@ formatters: goimports: local-prefixes: - github.com/prometheus/procfs - exclusions: - generated: lax - paths: - - third_party$ - - builtin$ - - examples$ diff --git a/vendor/github.com/prometheus/procfs/Makefile b/vendor/github.com/prometheus/procfs/Makefile index 7edfe4d09..bce50a19c 100644 --- a/vendor/github.com/prometheus/procfs/Makefile +++ b/vendor/github.com/prometheus/procfs/Makefile @@ -1,4 +1,4 @@ -# Copyright 2018 The Prometheus Authors +# Copyright The Prometheus Authors # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common index 4de21512f..6f61bec48 100644 --- a/vendor/github.com/prometheus/procfs/Makefile.common +++ b/vendor/github.com/prometheus/procfs/Makefile.common @@ -139,7 +139,7 @@ common-deps: update-go-deps: @echo ">> updating Go dependencies" @for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \ - $(GO) get -d $$m; \ + $(GO) get $$m; \ done $(GO) mod tidy diff --git a/vendor/github.com/prometheus/procfs/arp.go b/vendor/github.com/prometheus/procfs/arp.go index 2e5334415..716bdef10 100644 --- a/vendor/github.com/prometheus/procfs/arp.go +++ b/vendor/github.com/prometheus/procfs/arp.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -73,15 +73,16 @@ func parseARPEntries(data []byte) ([]ARPEntry, error) { columns := strings.Fields(line) width := len(columns) - if width == expectedHeaderWidth || width == 0 { + switch width { + case expectedHeaderWidth, 0: continue - } else if width == expectedDataWidth { + case expectedDataWidth: entry, err := parseARPEntry(columns) if err != nil { return []ARPEntry{}, fmt.Errorf("%w: Failed to parse ARP entry: %v: %w", ErrFileParse, entry, err) } entries = append(entries, entry) - } else { + default: return []ARPEntry{}, fmt.Errorf("%w: %d columns found, but expected %d: %w", ErrFileParse, width, expectedDataWidth, err) } diff --git a/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/prometheus/procfs/buddyinfo.go index 838075009..53243e687 100644 --- a/vendor/github.com/prometheus/procfs/buddyinfo.go +++ b/vendor/github.com/prometheus/procfs/buddyinfo.go @@ -1,4 +1,4 @@ -// Copyright 2017 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -64,14 +64,12 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) { if bucketCount == -1 { bucketCount = arraySize - } else { - if bucketCount != arraySize { - return nil, fmt.Errorf("%w: mismatch in number of buddyinfo buckets, previous count %d, new count %d", ErrFileParse, bucketCount, arraySize) - } + } else if bucketCount != arraySize { + return nil, fmt.Errorf("%w: mismatch in number of buddyinfo buckets, previous count %d, new count %d", ErrFileParse, bucketCount, arraySize) } sizes := make([]float64, arraySize) - for i := 0; i < arraySize; i++ { + for i := range arraySize { sizes[i], err = strconv.ParseFloat(parts[i+4], 64) if err != nil { return nil, fmt.Errorf("%w: Invalid valid in buddyinfo: %f: %w", ErrFileParse, sizes[i], err) diff --git a/vendor/github.com/prometheus/procfs/cmdline.go b/vendor/github.com/prometheus/procfs/cmdline.go index bf4f3b48c..4f1cac1f0 100644 --- a/vendor/github.com/prometheus/procfs/cmdline.go +++ b/vendor/github.com/prometheus/procfs/cmdline.go @@ -1,4 +1,4 @@ -// Copyright 2021 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/cpuinfo.go b/vendor/github.com/prometheus/procfs/cpuinfo.go index f0950bb49..5fe6cecd3 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_armx.go b/vendor/github.com/prometheus/procfs/cpuinfo_armx.go index 64cfd534c..8f155551e 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_armx.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_armx.go @@ -1,4 +1,4 @@ -// Copyright 2020 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go b/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go index d88442f0e..e81a5db94 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go @@ -1,4 +1,4 @@ -// Copyright 2022 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go b/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go index c11207f3a..4be2b1cc5 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go @@ -1,4 +1,4 @@ -// Copyright 2020 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_others.go b/vendor/github.com/prometheus/procfs/cpuinfo_others.go index a6b2b3127..e713bae8d 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_others.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_others.go @@ -1,4 +1,4 @@ -// Copyright 2020 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go b/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go index 003bc2ad4..0825aa1a8 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go @@ -1,4 +1,4 @@ -// Copyright 2020 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go b/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go index 1c9b7313b..496770b05 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go @@ -1,4 +1,4 @@ -// Copyright 2020 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go b/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go index fa3686bc0..b3228ce3d 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go @@ -1,4 +1,4 @@ -// Copyright 2020 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_x86.go b/vendor/github.com/prometheus/procfs/cpuinfo_x86.go index a0ef55562..575eb022e 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo_x86.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo_x86.go @@ -1,4 +1,4 @@ -// Copyright 2020 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/crypto.go b/vendor/github.com/prometheus/procfs/crypto.go index 5f2a37a78..e4a5876ea 100644 --- a/vendor/github.com/prometheus/procfs/crypto.go +++ b/vendor/github.com/prometheus/procfs/crypto.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/doc.go b/vendor/github.com/prometheus/procfs/doc.go index f9d961e44..26bfea071 100644 --- a/vendor/github.com/prometheus/procfs/doc.go +++ b/vendor/github.com/prometheus/procfs/doc.go @@ -1,4 +1,4 @@ -// Copyright 2014 Prometheus Team +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go index 9bdaccc7c..8f27912a1 100644 --- a/vendor/github.com/prometheus/procfs/fs.go +++ b/vendor/github.com/prometheus/procfs/fs.go @@ -1,4 +1,4 @@ -// Copyright 2018 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go index 1b5bdbdf8..3c53023c5 100644 --- a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go +++ b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go @@ -1,4 +1,4 @@ -// Copyright 2018 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_type.go b/vendor/github.com/prometheus/procfs/fs_statfs_type.go index 80df79c31..80fce4847 100644 --- a/vendor/github.com/prometheus/procfs/fs_statfs_type.go +++ b/vendor/github.com/prometheus/procfs/fs_statfs_type.go @@ -1,4 +1,4 @@ -// Copyright 2018 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/fscache.go b/vendor/github.com/prometheus/procfs/fscache.go index 7db863307..9dde85707 100644 --- a/vendor/github.com/prometheus/procfs/fscache.go +++ b/vendor/github.com/prometheus/procfs/fscache.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -388,20 +388,21 @@ func parseFscacheinfo(r io.Reader) (*Fscacheinfo, error) { } } case "CacheOp:": - if strings.Split(fields[1], "=")[0] == "alo" { + switch strings.Split(fields[1], "=")[0] { + case "alo": err := setFSCacheFields(fields[1:], &m.CacheopAllocationsinProgress, &m.CacheopLookupObjectInProgress, &m.CacheopLookupCompleteInPorgress, &m.CacheopGrabObjectInProgress) if err != nil { return &m, err } - } else if strings.Split(fields[1], "=")[0] == "inv" { + case "inv": err := setFSCacheFields(fields[1:], &m.CacheopInvalidations, &m.CacheopUpdateObjectInProgress, &m.CacheopDropObjectInProgress, &m.CacheopPutObjectInProgress, &m.CacheopAttributeChangeInProgress, &m.CacheopSyncCacheInProgress) if err != nil { return &m, err } - } else { + default: err := setFSCacheFields(fields[1:], &m.CacheopReadOrAllocPageInProgress, &m.CacheopReadOrAllocPagesInProgress, &m.CacheopAllocatePageInProgress, &m.CacheopAllocatePagesInProgress, &m.CacheopWritePagesInProgress, &m.CacheopUncachePagesInProgress, &m.CacheopDissociatePagesInProgress) diff --git a/vendor/github.com/prometheus/procfs/internal/fs/fs.go b/vendor/github.com/prometheus/procfs/internal/fs/fs.go index 3a43e8391..e7ccad66b 100644 --- a/vendor/github.com/prometheus/procfs/internal/fs/fs.go +++ b/vendor/github.com/prometheus/procfs/internal/fs/fs.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go index 5a7d2df06..30c587201 100644 --- a/vendor/github.com/prometheus/procfs/internal/util/parse.go +++ b/vendor/github.com/prometheus/procfs/internal/util/parse.go @@ -1,4 +1,4 @@ -// Copyright 2018 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/internal/util/readfile.go b/vendor/github.com/prometheus/procfs/internal/util/readfile.go index 71b7a70eb..0e41f71af 100644 --- a/vendor/github.com/prometheus/procfs/internal/util/readfile.go +++ b/vendor/github.com/prometheus/procfs/internal/util/readfile.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go index d5404a6d7..8318d8dfd 100644 --- a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go +++ b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go @@ -1,4 +1,4 @@ -// Copyright 2018 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go index 1d86f5e63..15bb096ee 100644 --- a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go +++ b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/internal/util/valueparser.go b/vendor/github.com/prometheus/procfs/internal/util/valueparser.go index fe2355d3c..e0ed671ea 100644 --- a/vendor/github.com/prometheus/procfs/internal/util/valueparser.go +++ b/vendor/github.com/prometheus/procfs/internal/util/valueparser.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go index bc3a20c93..5374da9fa 100644 --- a/vendor/github.com/prometheus/procfs/ipvs.go +++ b/vendor/github.com/prometheus/procfs/ipvs.go @@ -1,4 +1,4 @@ -// Copyright 2018 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/kernel_hung.go b/vendor/github.com/prometheus/procfs/kernel_hung.go new file mode 100644 index 000000000..539c11151 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/kernel_hung.go @@ -0,0 +1,45 @@ +// Copyright The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !windows +// +build !windows + +package procfs + +import ( + "os" + "strconv" + "strings" +) + +// KernelHung contains information about to the kernel's hung_task_detect_count number. +type KernelHung struct { + // Indicates the total number of tasks that have been detected as hung since the system boot. + // This file shows up if `CONFIG_DETECT_HUNG_TASK` is enabled. + HungTaskDetectCount *uint64 +} + +// KernelHung returns values from /proc/sys/kernel/hung_task_detect_count. +func (fs FS) KernelHung() (KernelHung, error) { + data, err := os.ReadFile(fs.proc.Path("sys", "kernel", "hung_task_detect_count")) + if err != nil { + return KernelHung{}, err + } + val, err := strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) + if err != nil { + return KernelHung{}, err + } + return KernelHung{ + HungTaskDetectCount: &val, + }, nil +} diff --git a/vendor/github.com/prometheus/procfs/kernel_random.go b/vendor/github.com/prometheus/procfs/kernel_random.go index db88566bd..b66565a10 100644 --- a/vendor/github.com/prometheus/procfs/kernel_random.go +++ b/vendor/github.com/prometheus/procfs/kernel_random.go @@ -1,4 +1,4 @@ -// Copyright 2020 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/loadavg.go b/vendor/github.com/prometheus/procfs/loadavg.go index 332e76c17..c8c78a65e 100644 --- a/vendor/github.com/prometheus/procfs/loadavg.go +++ b/vendor/github.com/prometheus/procfs/loadavg.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go index 1fd4381b2..d66eeda82 100644 --- a/vendor/github.com/prometheus/procfs/mdstat.go +++ b/vendor/github.com/prometheus/procfs/mdstat.go @@ -1,4 +1,4 @@ -// Copyright 2018 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -27,13 +27,34 @@ var ( recoveryLinePctRE = regexp.MustCompile(`= (.+)%`) recoveryLineFinishRE = regexp.MustCompile(`finish=(.+)min`) recoveryLineSpeedRE = regexp.MustCompile(`speed=(.+)[A-Z]`) - componentDeviceRE = regexp.MustCompile(`(.*)\[\d+\]`) + componentDeviceRE = regexp.MustCompile(`(.*)\[(\d+)\](\([SF]+\))?`) + personalitiesPrefix = "Personalities : " ) +type MDStatComponent struct { + // Name of the component device. + Name string + // DescriptorIndex number of component device, e.g. the order in the superblock. + DescriptorIndex int32 + // Flags per Linux drivers/md/md.[ch] as of v6.12-rc1 + // Subset that are exposed in mdstat + WriteMostly bool + Journal bool + Faulty bool // "Faulty" is what kernel source uses for "(F)" + Spare bool + Replacement bool + // Some additional flags that are NOT exposed in procfs today; they may + // be available via sysfs. + // In_sync, Bitmap_sync, Blocked, WriteErrorSeen, FaultRecorded, + // BlockedBadBlocks, WantReplacement, Candidate, ... +} + // MDStat holds info parsed from /proc/mdstat. type MDStat struct { // Name of the device. Name string + // raid type of the device. + Type string // activity-state of the device. ActivityState string // Number of active disks. @@ -58,8 +79,8 @@ type MDStat struct { BlocksSyncedFinishTime float64 // current sync speed (in Kilobytes/sec) BlocksSyncedSpeed float64 - // Name of md component devices - Devices []string + // component devices + Devices []MDStatComponent } // MDStat parses an mdstat-file (/proc/mdstat) and returns a slice of @@ -80,28 +101,52 @@ func (fs FS) MDStat() ([]MDStat, error) { // parseMDStat parses data from mdstat file (/proc/mdstat) and returns a slice of // structs containing the relevant info. func parseMDStat(mdStatData []byte) ([]MDStat, error) { + // TODO: + // - parse global hotspares from the "unused devices" line. mdStats := []MDStat{} lines := strings.Split(string(mdStatData), "\n") + knownRaidTypes := make(map[string]bool) for i, line := range lines { if strings.TrimSpace(line) == "" || line[0] == ' ' || - strings.HasPrefix(line, "Personalities") || strings.HasPrefix(line, "unused") { continue } + // Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10] + if len(knownRaidTypes) == 0 && strings.HasPrefix(line, personalitiesPrefix) { + personalities := strings.Fields(line[len(personalitiesPrefix):]) + for _, word := range personalities { + word := word[1 : len(word)-1] + knownRaidTypes[word] = true + } + continue + } deviceFields := strings.Fields(line) if len(deviceFields) < 3 { return nil, fmt.Errorf("%w: Expected 3+ lines, got %q", ErrFileParse, line) } mdName := deviceFields[0] // mdx - state := deviceFields[2] // active or inactive + state := deviceFields[2] // active, inactive, broken + + mdType := "unknown" // raid1, raid5, etc. + var deviceStartIndex int + if len(deviceFields) > 3 { // mdType may be in the 3rd or 4th field + if isRaidType(deviceFields[3], knownRaidTypes) { + mdType = deviceFields[3] + deviceStartIndex = 4 + } else if len(deviceFields) > 4 && isRaidType(deviceFields[4], knownRaidTypes) { + // if the 3rd field is (...), the 4th field is the mdType + mdType = deviceFields[4] + deviceStartIndex = 5 + } + } if len(lines) <= i+3 { return nil, fmt.Errorf("%w: Too few lines for md device: %q", ErrFileParse, mdName) } - // Failed disks have the suffix (F) & Spare disks have the suffix (S). + // Failed (Faulty) disks have the suffix (F) & Spare disks have the suffix (S). fail := int64(strings.Count(line, "(F)")) spare := int64(strings.Count(line, "(S)")) active, total, down, size, err := evalStatusLine(lines[i], lines[i+1]) @@ -129,13 +174,14 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { // Append recovery and resyncing state info. if recovering || resyncing || checking || reshaping { - if recovering { + switch { + case recovering: state = "recovering" - } else if reshaping { + case reshaping: state = "reshaping" - } else if checking { + case checking: state = "checking" - } else { + default: state = "resyncing" } @@ -151,8 +197,14 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { } } + devices, err := evalComponentDevices(deviceFields[deviceStartIndex:]) + if err != nil { + return nil, fmt.Errorf("error parsing components in md device %q: %w", mdName, err) + } + mdStats = append(mdStats, MDStat{ Name: mdName, + Type: mdType, ActivityState: state, DisksActive: active, DisksFailed: fail, @@ -165,14 +217,24 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { BlocksSyncedPct: pct, BlocksSyncedFinishTime: finish, BlocksSyncedSpeed: speed, - Devices: evalComponentDevices(deviceFields), + Devices: devices, }) } return mdStats, nil } +// check if a string's format is like the mdType +// Rule 1: mdType should not be like (...) +// Rule 2: mdType should not be like sda[0] +// . +func isRaidType(mdType string, knownRaidTypes map[string]bool) bool { + _, ok := knownRaidTypes[mdType] + return !strings.ContainsAny(mdType, "([") && ok +} + func evalStatusLine(deviceLine, statusLine string) (active, total, down, size int64, err error) { + // e.g. 523968 blocks super 1.2 [4/4] [UUUU] statusFields := strings.Fields(statusLine) if len(statusFields) < 1 { return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) @@ -263,17 +325,29 @@ func evalRecoveryLine(recoveryLine string) (blocksSynced int64, blocksToBeSynced return blocksSynced, blocksToBeSynced, pct, finish, speed, nil } -func evalComponentDevices(deviceFields []string) []string { - mdComponentDevices := make([]string, 0) - if len(deviceFields) > 3 { - for _, field := range deviceFields[4:] { - match := componentDeviceRE.FindStringSubmatch(field) - if match == nil { - continue - } - mdComponentDevices = append(mdComponentDevices, match[1]) +func evalComponentDevices(deviceFields []string) ([]MDStatComponent, error) { + mdComponentDevices := make([]MDStatComponent, 0) + for _, field := range deviceFields { + match := componentDeviceRE.FindStringSubmatch(field) + if match == nil { + continue + } + descriptorIndex, err := strconv.ParseInt(match[2], 10, 32) + if err != nil { + return mdComponentDevices, fmt.Errorf("error parsing int from device %q: %w", match[2], err) } + mdComponentDevices = append(mdComponentDevices, MDStatComponent{ + Name: match[1], + DescriptorIndex: int32(descriptorIndex), + // match may contain one or more of these + // https://github.com/torvalds/linux/blob/7ec462100ef9142344ddbf86f2c3008b97acddbe/drivers/md/md.c#L8376-L8392 + Faulty: strings.Contains(match[3], "(F)"), + Spare: strings.Contains(match[3], "(S)"), + Journal: strings.Contains(match[3], "(J)"), + Replacement: strings.Contains(match[3], "(R)"), + WriteMostly: strings.Contains(match[3], "(W)"), + }) } - return mdComponentDevices + return mdComponentDevices, nil } diff --git a/vendor/github.com/prometheus/procfs/meminfo.go b/vendor/github.com/prometheus/procfs/meminfo.go index 937e1f960..342038318 100644 --- a/vendor/github.com/prometheus/procfs/meminfo.go +++ b/vendor/github.com/prometheus/procfs/meminfo.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -307,7 +307,7 @@ func parseMemInfo(r io.Reader) (*Meminfo, error) { m.ZswapBytes = &valBytes case "Zswapped:": m.Zswapped = &val - m.ZswapBytes = &valBytes + m.ZswappedBytes = &valBytes case "Dirty:": m.Dirty = &val m.DirtyBytes = &valBytes diff --git a/vendor/github.com/prometheus/procfs/mountinfo.go b/vendor/github.com/prometheus/procfs/mountinfo.go index a704c5e73..9414a12f4 100644 --- a/vendor/github.com/prometheus/procfs/mountinfo.go +++ b/vendor/github.com/prometheus/procfs/mountinfo.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -147,8 +147,7 @@ func mountOptionsParseOptionalFields(o []string) (map[string]string, error) { // mountOptionsParser parses the mount options, superblock options. func mountOptionsParser(mountOptions string) map[string]string { opts := make(map[string]string) - options := strings.Split(mountOptions, ",") - for _, opt := range options { + for opt := range strings.SplitSeq(mountOptions, ",") { splitOption := strings.Split(opt, "=") if len(splitOption) < 2 { key := splitOption[0] @@ -178,3 +177,21 @@ func GetProcMounts(pid int) ([]*MountInfo, error) { } return parseMountInfo(data) } + +// GetMounts retrieves mountinfo information from `/proc/self/mountinfo`. +func (fs FS) GetMounts() ([]*MountInfo, error) { + data, err := util.ReadFileNoStat(fs.proc.Path("self/mountinfo")) + if err != nil { + return nil, err + } + return parseMountInfo(data) +} + +// GetProcMounts retrieves mountinfo information from a processes' `/proc//mountinfo`. +func (fs FS) GetProcMounts(pid int) ([]*MountInfo, error) { + data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%d/mountinfo", pid))) + if err != nil { + return nil, err + } + return parseMountInfo(data) +} diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go index 50caa7327..e503cb3a6 100644 --- a/vendor/github.com/prometheus/procfs/mountstats.go +++ b/vendor/github.com/prometheus/procfs/mountstats.go @@ -1,4 +1,4 @@ -// Copyright 2018 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -383,7 +383,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e if stats.Opts == nil { stats.Opts = map[string]string{} } - for _, opt := range strings.Split(ss[1], ",") { + for opt := range strings.SplitSeq(ss[1], ",") { split := strings.Split(opt, "=") if len(split) == 2 { stats.Opts[split[0]] = split[1] diff --git a/vendor/github.com/prometheus/procfs/net_conntrackstat.go b/vendor/github.com/prometheus/procfs/net_conntrackstat.go index 316df5fbb..e9ca35707 100644 --- a/vendor/github.com/prometheus/procfs/net_conntrackstat.go +++ b/vendor/github.com/prometheus/procfs/net_conntrackstat.go @@ -1,4 +1,4 @@ -// Copyright 2020 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/net_dev.go b/vendor/github.com/prometheus/procfs/net_dev.go index e66208aa0..7b3e1d61c 100644 --- a/vendor/github.com/prometheus/procfs/net_dev.go +++ b/vendor/github.com/prometheus/procfs/net_dev.go @@ -1,4 +1,4 @@ -// Copyright 2018 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/net_dev_snmp6.go b/vendor/github.com/prometheus/procfs/net_dev_snmp6.go index f50b38e35..2a0f60f29 100644 --- a/vendor/github.com/prometheus/procfs/net_dev_snmp6.go +++ b/vendor/github.com/prometheus/procfs/net_dev_snmp6.go @@ -1,4 +1,4 @@ -// Copyright 2018 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -18,6 +18,7 @@ import ( "errors" "io" "os" + "path/filepath" "strconv" "strings" ) @@ -56,7 +57,9 @@ func newNetDevSNMP6(dir string) (NetDevSNMP6, error) { } for _, iFaceFile := range ifaceFiles { - f, err := os.Open(dir + "/" + iFaceFile.Name()) + filePath := filepath.Join(dir, iFaceFile.Name()) + + f, err := os.Open(filePath) if err != nil { return netDevSNMP6, err } diff --git a/vendor/github.com/prometheus/procfs/net_ip_socket.go b/vendor/github.com/prometheus/procfs/net_ip_socket.go index 19e3378f7..9291f8cd4 100644 --- a/vendor/github.com/prometheus/procfs/net_ip_socket.go +++ b/vendor/github.com/prometheus/procfs/net_ip_socket.go @@ -1,4 +1,4 @@ -// Copyright 2020 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/net_protocols.go b/vendor/github.com/prometheus/procfs/net_protocols.go index 8d4b1ac05..eaa996cbc 100644 --- a/vendor/github.com/prometheus/procfs/net_protocols.go +++ b/vendor/github.com/prometheus/procfs/net_protocols.go @@ -1,4 +1,4 @@ -// Copyright 2020 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -169,7 +169,7 @@ func (pc *NetProtocolCapabilities) parseCapabilities(capabilities []string) erro &pc.EnterMemoryPressure, } - for i := 0; i < len(capabilities); i++ { + for i := range capabilities { switch capabilities[i] { case "y": *capabilityFields[i] = true diff --git a/vendor/github.com/prometheus/procfs/net_route.go b/vendor/github.com/prometheus/procfs/net_route.go index deb7029fe..fa3812d9d 100644 --- a/vendor/github.com/prometheus/procfs/net_route.go +++ b/vendor/github.com/prometheus/procfs/net_route.go @@ -1,4 +1,4 @@ -// Copyright 2023 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/net_sockstat.go b/vendor/github.com/prometheus/procfs/net_sockstat.go index fae62b13d..8b221ebff 100644 --- a/vendor/github.com/prometheus/procfs/net_sockstat.go +++ b/vendor/github.com/prometheus/procfs/net_sockstat.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -139,9 +139,6 @@ func parseSockstatKVs(kvs []string) (map[string]int, error) { func parseSockstatProtocol(kvs map[string]int) NetSockstatProtocol { var nsp NetSockstatProtocol for k, v := range kvs { - // Capture the range variable to ensure we get unique pointers for - // each of the optional fields. - v := v switch k { case "inuse": nsp.InUse = v diff --git a/vendor/github.com/prometheus/procfs/net_softnet.go b/vendor/github.com/prometheus/procfs/net_softnet.go index 71c8059f4..4a2dfa18f 100644 --- a/vendor/github.com/prometheus/procfs/net_softnet.go +++ b/vendor/github.com/prometheus/procfs/net_softnet.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/net_tcp.go b/vendor/github.com/prometheus/procfs/net_tcp.go index 0396d7201..610ea78e5 100644 --- a/vendor/github.com/prometheus/procfs/net_tcp.go +++ b/vendor/github.com/prometheus/procfs/net_tcp.go @@ -1,4 +1,4 @@ -// Copyright 2020 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/net_tls_stat.go b/vendor/github.com/prometheus/procfs/net_tls_stat.go index 13994c178..b1b3f6a6a 100644 --- a/vendor/github.com/prometheus/procfs/net_tls_stat.go +++ b/vendor/github.com/prometheus/procfs/net_tls_stat.go @@ -1,4 +1,4 @@ -// Copyright 2023 Prometheus Team +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/net_udp.go b/vendor/github.com/prometheus/procfs/net_udp.go index 9ac3daf2d..8a3277910 100644 --- a/vendor/github.com/prometheus/procfs/net_udp.go +++ b/vendor/github.com/prometheus/procfs/net_udp.go @@ -1,4 +1,4 @@ -// Copyright 2020 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/net_unix.go b/vendor/github.com/prometheus/procfs/net_unix.go index d7e0cacb4..e4d635923 100644 --- a/vendor/github.com/prometheus/procfs/net_unix.go +++ b/vendor/github.com/prometheus/procfs/net_unix.go @@ -1,4 +1,4 @@ -// Copyright 2018 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/net_wireless.go b/vendor/github.com/prometheus/procfs/net_wireless.go index 7c597bc87..69d079445 100644 --- a/vendor/github.com/prometheus/procfs/net_wireless.go +++ b/vendor/github.com/prometheus/procfs/net_wireless.go @@ -1,4 +1,4 @@ -// Copyright 2023 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/net_xfrm.go b/vendor/github.com/prometheus/procfs/net_xfrm.go index 932ef2046..5a9f497d1 100644 --- a/vendor/github.com/prometheus/procfs/net_xfrm.go +++ b/vendor/github.com/prometheus/procfs/net_xfrm.go @@ -1,4 +1,4 @@ -// Copyright 2017 Prometheus Team +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/netstat.go b/vendor/github.com/prometheus/procfs/netstat.go index 742dff453..dbdae4739 100644 --- a/vendor/github.com/prometheus/procfs/netstat.go +++ b/vendor/github.com/prometheus/procfs/netstat.go @@ -1,4 +1,4 @@ -// Copyright 2020 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/nfnetlink_queue.go b/vendor/github.com/prometheus/procfs/nfnetlink_queue.go new file mode 100644 index 000000000..b0a73b11e --- /dev/null +++ b/vendor/github.com/prometheus/procfs/nfnetlink_queue.go @@ -0,0 +1,85 @@ +// Copyright The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + + "github.com/prometheus/procfs/internal/util" +) + +const nfNetLinkQueueFormat = "%d %d %d %d %d %d %d %d %d" + +// NFNetLinkQueue contains general information about netfilter queues found in /proc/net/netfilter/nfnetlink_queue. +type NFNetLinkQueue struct { + // id of the queue + QueueID uint + // pid of process handling the queue + PeerPID uint + // number of packets waiting for a decision + QueueTotal uint + // indicate how userspace receive packets + CopyMode uint + // size of copy + CopyRange uint + // number of items dropped by the kernel because too many packets were waiting a decision. + // It queue_total is superior to queue_max_len (1024 per default) the packets are dropped. + QueueDropped uint + // number of packets dropped by userspace (due to kernel send failure on the netlink socket) + QueueUserDropped uint + // sequence number of packets queued. It gives a correct approximation of the number of queued packets. + SequenceID uint + // internal value (number of entity using the queue) + Use uint +} + +// NFNetLinkQueue returns information about current state of netfilter queues. +func (fs FS) NFNetLinkQueue() ([]NFNetLinkQueue, error) { + data, err := util.ReadFileNoStat(fs.proc.Path("net/netfilter/nfnetlink_queue")) + if err != nil { + return nil, err + } + + queue := []NFNetLinkQueue{} + if len(data) == 0 { + return queue, nil + } + + scanner := bufio.NewScanner(bytes.NewReader(data)) + for scanner.Scan() { + line := scanner.Text() + nFNetLinkQueue, err := parseNFNetLinkQueueLine(line) + if err != nil { + return nil, err + } + queue = append(queue, *nFNetLinkQueue) + } + return queue, nil +} + +// parseNFNetLinkQueueLine parses each line of the /proc/net/netfilter/nfnetlink_queue file. +func parseNFNetLinkQueueLine(line string) (*NFNetLinkQueue, error) { + nFNetLinkQueue := NFNetLinkQueue{} + _, err := fmt.Sscanf( + line, nfNetLinkQueueFormat, + &nFNetLinkQueue.QueueID, &nFNetLinkQueue.PeerPID, &nFNetLinkQueue.QueueTotal, &nFNetLinkQueue.CopyMode, + &nFNetLinkQueue.CopyRange, &nFNetLinkQueue.QueueDropped, &nFNetLinkQueue.QueueUserDropped, &nFNetLinkQueue.SequenceID, &nFNetLinkQueue.Use, + ) + if err != nil { + return nil, err + } + return &nFNetLinkQueue, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go index 368187fa8..39c14aa55 100644 --- a/vendor/github.com/prometheus/procfs/proc.go +++ b/vendor/github.com/prometheus/procfs/proc.go @@ -1,4 +1,4 @@ -// Copyright 2018 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -49,7 +49,7 @@ func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID } // Self returns a process for the current process read via /proc/self. func Self() (Proc, error) { fs, err := NewFS(DefaultMountPoint) - if err != nil || errors.Unwrap(err) == ErrMountPoint { + if err != nil || errors.Is(err, ErrMountPoint) { return Proc{}, err } return fs.Self() diff --git a/vendor/github.com/prometheus/procfs/proc_cgroup.go b/vendor/github.com/prometheus/procfs/proc_cgroup.go index 4a64347c0..535c08d6f 100644 --- a/vendor/github.com/prometheus/procfs/proc_cgroup.go +++ b/vendor/github.com/prometheus/procfs/proc_cgroup.go @@ -1,4 +1,4 @@ -// Copyright 2020 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/proc_cgroups.go b/vendor/github.com/prometheus/procfs/proc_cgroups.go index 5dd493899..0b275c3b1 100644 --- a/vendor/github.com/prometheus/procfs/proc_cgroups.go +++ b/vendor/github.com/prometheus/procfs/proc_cgroups.go @@ -1,4 +1,4 @@ -// Copyright 2021 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -40,13 +40,13 @@ type CgroupSummary struct { // parseCgroupSummary parses each line of the /proc/cgroup file // Line format is `subsys_name hierarchy num_cgroups enabled`. -func parseCgroupSummaryString(CgroupSummaryStr string) (*CgroupSummary, error) { +func parseCgroupSummaryString(cgroupSummaryStr string) (*CgroupSummary, error) { var err error - fields := strings.Fields(CgroupSummaryStr) + fields := strings.Fields(cgroupSummaryStr) // require at least 4 fields if len(fields) < 4 { - return nil, fmt.Errorf("%w: 4+ fields required, found %d fields in cgroup info string: %s", ErrFileParse, len(fields), CgroupSummaryStr) + return nil, fmt.Errorf("%w: 4+ fields required, found %d fields in cgroup info string: %s", ErrFileParse, len(fields), cgroupSummaryStr) } CgroupSummary := &CgroupSummary{ diff --git a/vendor/github.com/prometheus/procfs/proc_environ.go b/vendor/github.com/prometheus/procfs/proc_environ.go index 57a89895d..5b941de04 100644 --- a/vendor/github.com/prometheus/procfs/proc_environ.go +++ b/vendor/github.com/prometheus/procfs/proc_environ.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/proc_fdinfo.go b/vendor/github.com/prometheus/procfs/proc_fdinfo.go index fa761b352..fa57761db 100644 --- a/vendor/github.com/prometheus/procfs/proc_fdinfo.go +++ b/vendor/github.com/prometheus/procfs/proc_fdinfo.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -60,15 +60,16 @@ func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) { scanner := bufio.NewScanner(bytes.NewReader(data)) for scanner.Scan() { text = scanner.Text() - if rPos.MatchString(text) { + switch { + case rPos.MatchString(text): pos = rPos.FindStringSubmatch(text)[1] - } else if rFlags.MatchString(text) { + case rFlags.MatchString(text): flags = rFlags.FindStringSubmatch(text)[1] - } else if rMntID.MatchString(text) { + case rMntID.MatchString(text): mntid = rMntID.FindStringSubmatch(text)[1] - } else if rIno.MatchString(text) { + case rIno.MatchString(text): ino = rIno.FindStringSubmatch(text)[1] - } else if rInotify.MatchString(text) { + case rInotify.MatchString(text): newInotify, err := parseInotifyInfo(text) if err != nil { return nil, err diff --git a/vendor/github.com/prometheus/procfs/proc_interrupts.go b/vendor/github.com/prometheus/procfs/proc_interrupts.go index 86b4b4524..b942c5072 100644 --- a/vendor/github.com/prometheus/procfs/proc_interrupts.go +++ b/vendor/github.com/prometheus/procfs/proc_interrupts.go @@ -1,4 +1,4 @@ -// Copyright 2022 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go index d15b66ddb..dd8086ba2 100644 --- a/vendor/github.com/prometheus/procfs/proc_io.go +++ b/vendor/github.com/prometheus/procfs/proc_io.go @@ -1,4 +1,4 @@ -// Copyright 2018 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go index 9530b14bc..4b7d33784 100644 --- a/vendor/github.com/prometheus/procfs/proc_limits.go +++ b/vendor/github.com/prometheus/procfs/proc_limits.go @@ -1,4 +1,4 @@ -// Copyright 2018 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -19,6 +19,7 @@ import ( "os" "regexp" "strconv" + "strings" ) // ProcLimits represents the soft limits for each of the process's resource @@ -74,7 +75,7 @@ const ( ) var ( - limitsMatch = regexp.MustCompile(`(Max \w+\s{0,1}?\w*\s{0,1}\w*)\s{2,}(\w+)\s+(\w+)`) + limitsMatch = regexp.MustCompile(`(Max \w+\s??\w*\s?\w*)\s{2,}(\w+)\s+(\w+)`) ) // NewLimits returns the current soft limits of the process. @@ -106,7 +107,7 @@ func (p Proc) Limits() (ProcLimits, error) { return ProcLimits{}, fmt.Errorf("%w: couldn't parse %q line %q", ErrFileParse, f.Name(), s.Text()) } - switch fields[1] { + switch strings.TrimSpace(fields[1]) { case "Max cpu time": l.CPUTime, err = parseUint(fields[2]) case "Max file size": diff --git a/vendor/github.com/prometheus/procfs/proc_maps.go b/vendor/github.com/prometheus/procfs/proc_maps.go index 7e75c286b..cc519f92f 100644 --- a/vendor/github.com/prometheus/procfs/proc_maps.go +++ b/vendor/github.com/prometheus/procfs/proc_maps.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/proc_netstat.go b/vendor/github.com/prometheus/procfs/proc_netstat.go index 4248c1716..7f94cc891 100644 --- a/vendor/github.com/prometheus/procfs/proc_netstat.go +++ b/vendor/github.com/prometheus/procfs/proc_netstat.go @@ -1,4 +1,4 @@ -// Copyright 2022 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/proc_ns.go b/vendor/github.com/prometheus/procfs/proc_ns.go index 0f8f847f9..5fc0eb9e2 100644 --- a/vendor/github.com/prometheus/procfs/proc_ns.go +++ b/vendor/github.com/prometheus/procfs/proc_ns.go @@ -1,4 +1,4 @@ -// Copyright 2018 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/proc_psi.go b/vendor/github.com/prometheus/procfs/proc_psi.go index ccd35f153..cc2c5de87 100644 --- a/vendor/github.com/prometheus/procfs/proc_psi.go +++ b/vendor/github.com/prometheus/procfs/proc_psi.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/proc_smaps.go b/vendor/github.com/prometheus/procfs/proc_smaps.go index 9a297afcf..3e48afd1d 100644 --- a/vendor/github.com/prometheus/procfs/proc_smaps.go +++ b/vendor/github.com/prometheus/procfs/proc_smaps.go @@ -1,4 +1,4 @@ -// Copyright 2020 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/proc_snmp.go b/vendor/github.com/prometheus/procfs/proc_snmp.go index 4bdc90b07..8d9a9bcd6 100644 --- a/vendor/github.com/prometheus/procfs/proc_snmp.go +++ b/vendor/github.com/prometheus/procfs/proc_snmp.go @@ -1,4 +1,4 @@ -// Copyright 2022 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/proc_snmp6.go b/vendor/github.com/prometheus/procfs/proc_snmp6.go index fb7fd3995..841fef464 100644 --- a/vendor/github.com/prometheus/procfs/proc_snmp6.go +++ b/vendor/github.com/prometheus/procfs/proc_snmp6.go @@ -1,4 +1,4 @@ -// Copyright 2022 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go index 3328556bd..02e3f9e31 100644 --- a/vendor/github.com/prometheus/procfs/proc_stat.go +++ b/vendor/github.com/prometheus/procfs/proc_stat.go @@ -1,4 +1,4 @@ -// Copyright 2018 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/proc_statm.go b/vendor/github.com/prometheus/procfs/proc_statm.go index ed5798424..b0a936016 100644 --- a/vendor/github.com/prometheus/procfs/proc_statm.go +++ b/vendor/github.com/prometheus/procfs/proc_statm.go @@ -1,4 +1,4 @@ -// Copyright 2025 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -80,7 +80,7 @@ func (p Proc) Statm() (ProcStatm, error) { func parseStatm(data []byte) ([]uint64, error) { var statmSlice []uint64 statmItems := strings.Fields(string(data)) - for i := 0; i < len(statmItems); i++ { + for i := range statmItems { statmItem, err := strconv.ParseUint(statmItems[i], 10, 64) if err != nil { return nil, err diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go index dd8aa5688..1ed2bced4 100644 --- a/vendor/github.com/prometheus/procfs/proc_status.go +++ b/vendor/github.com/prometheus/procfs/proc_status.go @@ -1,4 +1,4 @@ -// Copyright 2018 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -16,7 +16,7 @@ package procfs import ( "bytes" "math/bits" - "sort" + "slices" "strconv" "strings" @@ -94,8 +94,7 @@ func (p Proc) NewStatus() (ProcStatus, error) { s := ProcStatus{PID: p.PID} - lines := strings.Split(string(data), "\n") - for _, line := range lines { + for line := range strings.SplitSeq(string(data), "\n") { if !bytes.Contains([]byte(line), []byte(":")) { continue } @@ -222,7 +221,7 @@ func calcCpusAllowedList(cpuString string) []uint64 { } - sort.Slice(g, func(i, j int) bool { return g[i] < g[j] }) + slices.Sort(g) return g } diff --git a/vendor/github.com/prometheus/procfs/proc_sys.go b/vendor/github.com/prometheus/procfs/proc_sys.go index 3810d1ac9..52658a4d5 100644 --- a/vendor/github.com/prometheus/procfs/proc_sys.go +++ b/vendor/github.com/prometheus/procfs/proc_sys.go @@ -1,4 +1,4 @@ -// Copyright 2022 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/schedstat.go b/vendor/github.com/prometheus/procfs/schedstat.go index 5f7f32dc8..fafd8dff7 100644 --- a/vendor/github.com/prometheus/procfs/schedstat.go +++ b/vendor/github.com/prometheus/procfs/schedstat.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/slab.go b/vendor/github.com/prometheus/procfs/slab.go index 8611c9017..32a04678a 100644 --- a/vendor/github.com/prometheus/procfs/slab.go +++ b/vendor/github.com/prometheus/procfs/slab.go @@ -1,4 +1,4 @@ -// Copyright 2020 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/softirqs.go b/vendor/github.com/prometheus/procfs/softirqs.go index 403e6ae70..47b73a729 100644 --- a/vendor/github.com/prometheus/procfs/softirqs.go +++ b/vendor/github.com/prometheus/procfs/softirqs.go @@ -1,4 +1,4 @@ -// Copyright 2022 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go index e36b41c18..593ad0f62 100644 --- a/vendor/github.com/prometheus/procfs/stat.go +++ b/vendor/github.com/prometheus/procfs/stat.go @@ -1,4 +1,4 @@ -// Copyright 2018 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -16,6 +16,7 @@ package procfs import ( "bufio" "bytes" + "errors" "fmt" "io" "strconv" @@ -92,7 +93,7 @@ func parseCPUStat(line string) (CPUStat, int64, error) { &cpuStat.Iowait, &cpuStat.IRQ, &cpuStat.SoftIRQ, &cpuStat.Steal, &cpuStat.Guest, &cpuStat.GuestNice) - if err != nil && err != io.EOF { + if err != nil && !errors.Is(err, io.EOF) { return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): %w", ErrFileParse, line, err) } if count == 0 { diff --git a/vendor/github.com/prometheus/procfs/swaps.go b/vendor/github.com/prometheus/procfs/swaps.go index 65fec834b..ee17bf488 100644 --- a/vendor/github.com/prometheus/procfs/swaps.go +++ b/vendor/github.com/prometheus/procfs/swaps.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/thread.go b/vendor/github.com/prometheus/procfs/thread.go index 80e0e947b..0cfbb5418 100644 --- a/vendor/github.com/prometheus/procfs/thread.go +++ b/vendor/github.com/prometheus/procfs/thread.go @@ -1,4 +1,4 @@ -// Copyright 2022 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/vm.go b/vendor/github.com/prometheus/procfs/vm.go index 51c49d89e..2a8d76390 100644 --- a/vendor/github.com/prometheus/procfs/vm.go +++ b/vendor/github.com/prometheus/procfs/vm.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at diff --git a/vendor/github.com/prometheus/procfs/zoneinfo.go b/vendor/github.com/prometheus/procfs/zoneinfo.go index e54d94b09..806e17114 100644 --- a/vendor/github.com/prometheus/procfs/zoneinfo.go +++ b/vendor/github.com/prometheus/procfs/zoneinfo.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -88,11 +88,9 @@ func parseZoneinfo(zoneinfoData []byte) ([]Zoneinfo, error) { zoneinfo := []Zoneinfo{} - zoneinfoBlocks := bytes.Split(zoneinfoData, []byte("\nNode")) - for _, block := range zoneinfoBlocks { + for block := range bytes.SplitSeq(zoneinfoData, []byte("\nNode")) { var zoneinfoElement Zoneinfo - lines := strings.Split(string(block), "\n") - for _, line := range lines { + for line := range strings.SplitSeq(string(block), "\n") { if nodeZone := nodeZoneRE.FindStringSubmatch(line); nodeZone != nil { zoneinfoElement.Node = nodeZone[1] diff --git a/vendor/github.com/spf13/cobra/.golangci.yml b/vendor/github.com/spf13/cobra/.golangci.yml index 6acf8ab1e..104dc2440 100644 --- a/vendor/github.com/spf13/cobra/.golangci.yml +++ b/vendor/github.com/spf13/cobra/.golangci.yml @@ -57,3 +57,10 @@ linters: - common-false-positives - legacy - std-error-handling + settings: + govet: + # Disable buildtag check to allow dual build tag syntax (both //go:build and // +build). + # This is necessary for Go 1.15 compatibility since //go:build was introduced in Go 1.17. + # This can be removed once Cobra requires Go 1.17 or higher. + disable: + - buildtag diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go index 78088db69..c05fed45a 100644 --- a/vendor/github.com/spf13/cobra/command.go +++ b/vendor/github.com/spf13/cobra/command.go @@ -557,7 +557,7 @@ func (c *Command) FlagErrorFunc() (f func(*Command, error) error) { } } -var minUsagePadding = 25 +const minUsagePadding = 25 // UsagePadding return padding for the usage. func (c *Command) UsagePadding() int { @@ -567,7 +567,7 @@ func (c *Command) UsagePadding() int { return c.parent.commandsMaxUseLen } -var minCommandPathPadding = 11 +const minCommandPathPadding = 11 // CommandPathPadding return padding for the command path. func (c *Command) CommandPathPadding() int { @@ -577,7 +577,7 @@ func (c *Command) CommandPathPadding() int { return c.parent.commandsMaxCommandPathLen } -var minNamePadding = 11 +const minNamePadding = 11 // NamePadding returns padding for the name. func (c *Command) NamePadding() int { @@ -1939,7 +1939,7 @@ type tmplFunc struct { fn func(io.Writer, interface{}) error } -var defaultUsageTemplate = `Usage:{{if .Runnable}} +const defaultUsageTemplate = `Usage:{{if .Runnable}} {{.UseLine}}{{end}}{{if .HasAvailableSubCommands}} {{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}} @@ -2039,7 +2039,7 @@ func defaultUsageFunc(w io.Writer, in interface{}) error { return nil } -var defaultHelpTemplate = `{{with (or .Long .Short)}}{{. | trimTrailingWhitespaces}} +const defaultHelpTemplate = `{{with (or .Long .Short)}}{{. | trimTrailingWhitespaces}} {{end}}{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}` @@ -2061,7 +2061,7 @@ func defaultHelpFunc(w io.Writer, in interface{}) error { return nil } -var defaultVersionTemplate = `{{with .DisplayName}}{{printf "%s " .}}{{end}}{{printf "version %s" .Version}} +const defaultVersionTemplate = `{{with .DisplayName}}{{printf "%s " .}}{{end}}{{printf "version %s" .Version}} ` // defaultVersionFunc is equivalent to executing defaultVersionTemplate. The two should be changed in sync. diff --git a/vendor/github.com/stretchr/objx/README.md b/vendor/github.com/stretchr/objx/README.md index 78dc1f8b0..e9ba83073 100644 --- a/vendor/github.com/stretchr/objx/README.md +++ b/vendor/github.com/stretchr/objx/README.md @@ -1,8 +1,6 @@ # Objx [![Build Status](https://travis-ci.org/stretchr/objx.svg?branch=master)](https://travis-ci.org/stretchr/objx) [![Go Report Card](https://goreportcard.com/badge/github.com/stretchr/objx)](https://goreportcard.com/report/github.com/stretchr/objx) -[![Maintainability](https://api.codeclimate.com/v1/badges/1d64bc6c8474c2074f2b/maintainability)](https://codeclimate.com/github/stretchr/objx/maintainability) -[![Test Coverage](https://api.codeclimate.com/v1/badges/1d64bc6c8474c2074f2b/test_coverage)](https://codeclimate.com/github/stretchr/objx/test_coverage) [![Sourcegraph](https://sourcegraph.com/github.com/stretchr/objx/-/badge.svg)](https://sourcegraph.com/github.com/stretchr/objx) [![GoDoc](https://pkg.go.dev/badge/github.com/stretchr/objx?utm_source=godoc)](https://pkg.go.dev/github.com/stretchr/objx) @@ -19,49 +17,62 @@ Objx provides the `objx.Map` type, which is a `map[string]interface{}` that expo ### Pattern Objx uses a predictable pattern to make access data from within `map[string]interface{}` easy. Call one of the `objx.` functions to create your `objx.Map` to get going: - m, err := objx.FromJSON(json) +```go +m, err := objx.FromJSON(json) +``` NOTE: Any methods or functions with the `Must` prefix will panic if something goes wrong, the rest will be optimistic and try to figure things out without panicking. Use `Get` to access the value you're interested in. You can use dot and array notation too: - m.Get("places[0].latlng") +```go +m.Get("places[0].latlng") +``` Once you have sought the `Value` you're interested in, you can use the `Is*` methods to determine its type. - if m.Get("code").IsStr() { // Your code... } +```go +if m.Get("code").IsStr() { // Your code... } +``` Or you can just assume the type, and use one of the strong type methods to extract the real value: - m.Get("code").Int() +```go +m.Get("code").Int() +``` If there's no value there (or if it's the wrong type) then a default value will be returned, or you can be explicit about the default value. - Get("code").Int(-1) - +```go +Get("code").Int(-1) +``` If you're dealing with a slice of data as a value, Objx provides many useful methods for iterating, manipulating and selecting that data. You can find out more by exploring the index below. ### Reading data A simple example of how to use Objx: - // Use MustFromJSON to make an objx.Map from some JSON - m := objx.MustFromJSON(`{"name": "Mat", "age": 30}`) +```go +// Use MustFromJSON to make an objx.Map from some JSON +m := objx.MustFromJSON(`{"name": "Mat", "age": 30}`) - // Get the details - name := m.Get("name").Str() - age := m.Get("age").Int() +// Get the details +name := m.Get("name").Str() +age := m.Get("age").Int() - // Get their nickname (or use their name if they don't have one) - nickname := m.Get("nickname").Str(name) +// Get their nickname (or use their name if they don't have one) +nickname := m.Get("nickname").Str(name) +``` ### Ranging Since `objx.Map` is a `map[string]interface{}` you can treat it as such. For example, to `range` the data, do what you would expect: - m := objx.MustFromJSON(json) - for key, value := range m { - // Your code... - } +```go +m := objx.MustFromJSON(json) +for key, value := range m { + // Your code... +} +``` ## Installation To install Objx, use go get: diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go index 8409b5f8f..8236c995a 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go @@ -223,7 +223,7 @@ func (c *client) exportContext(parent context.Context) (context.Context, context ) if c.exportTimeout > 0 { - ctx, cancel = context.WithTimeout(parent, c.exportTimeout) + ctx, cancel = context.WithTimeoutCause(parent, c.exportTimeout, errors.New("exporter export timeout")) } else { ctx, cancel = context.WithCancel(parent) } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go index 506ca00b6..4f47117a5 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go @@ -92,12 +92,11 @@ func NewHTTPConfig(opts ...HTTPOption) Config { return cfg } -// cleanPath returns a path with all spaces trimmed and all redundancies -// removed. If urlPath is empty or cleaning it results in an empty string, +// cleanPath returns a path with all spaces trimmed. If urlPath is empty, // defaultPath is returned instead. func cleanPath(urlPath string, defaultPath string) string { - tmp := path.Clean(strings.TrimSpace(urlPath)) - if tmp == "." { + tmp := strings.TrimSpace(urlPath) + if tmp == "" || tmp == "." { return defaultPath } if !path.IsAbs(tmp) { diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go index 777e68a7b..259a898ae 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go @@ -132,7 +132,7 @@ func wait(ctx context.Context, delay time.Duration) error { select { case <-timer.C: default: - return ctx.Err() + return context.Cause(ctx) } case <-timer.C: } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go index 5f78bfdfb..ed2ddce71 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go @@ -5,5 +5,5 @@ package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" // Version is the current release version of the OpenTelemetry OTLP trace exporter in use. func Version() string { - return "1.36.0" + return "1.37.0" } diff --git a/vendor/golang.org/x/mod/modfile/print.go b/vendor/golang.org/x/mod/modfile/print.go index 2a0123d4b..48dbd82ae 100644 --- a/vendor/golang.org/x/mod/modfile/print.go +++ b/vendor/golang.org/x/mod/modfile/print.go @@ -33,7 +33,7 @@ type printer struct { } // printf prints to the buffer. -func (p *printer) printf(format string, args ...interface{}) { +func (p *printer) printf(format string, args ...any) { fmt.Fprintf(p, format, args...) } diff --git a/vendor/golang.org/x/mod/modfile/read.go b/vendor/golang.org/x/mod/modfile/read.go index f58de029e..504a2f1df 100644 --- a/vendor/golang.org/x/mod/modfile/read.go +++ b/vendor/golang.org/x/mod/modfile/read.go @@ -600,7 +600,7 @@ func (in *input) readToken() { // Checked all punctuation. Must be identifier token. if c := in.peekRune(); !isIdent(c) { - in.Error(fmt.Sprintf("unexpected input character %#q", c)) + in.Error(fmt.Sprintf("unexpected input character %#q", rune(c))) } // Scan over identifier. diff --git a/vendor/golang.org/x/mod/modfile/rule.go b/vendor/golang.org/x/mod/modfile/rule.go index a86ee4fd8..c5b8305de 100644 --- a/vendor/golang.org/x/mod/modfile/rule.go +++ b/vendor/golang.org/x/mod/modfile/rule.go @@ -368,7 +368,7 @@ func (f *File) add(errs *ErrorList, block *LineBlock, line *Line, verb string, a Err: err, }) } - errorf := func(format string, args ...interface{}) { + errorf := func(format string, args ...any) { wrapError(fmt.Errorf(format, args...)) } @@ -574,7 +574,7 @@ func parseReplace(filename string, line *Line, verb string, args []string, fix V Err: err, } } - errorf := func(format string, args ...interface{}) *Error { + errorf := func(format string, args ...any) *Error { return wrapError(fmt.Errorf(format, args...)) } @@ -685,7 +685,7 @@ func (f *WorkFile) add(errs *ErrorList, line *Line, verb string, args []string, Err: err, }) } - errorf := func(format string, args ...interface{}) { + errorf := func(format string, args ...any) { wrapError(fmt.Errorf(format, args...)) } @@ -1594,7 +1594,7 @@ func (f *File) AddRetract(vi VersionInterval, rationale string) error { r.Syntax = f.Syntax.addLine(nil, "retract", "[", AutoQuote(vi.Low), ",", AutoQuote(vi.High), "]") } if rationale != "" { - for _, line := range strings.Split(rationale, "\n") { + for line := range strings.SplitSeq(rationale, "\n") { com := Comment{Token: "// " + line} r.Syntax.Comment().Before = append(r.Syntax.Comment().Before, com) } diff --git a/vendor/golang.org/x/mod/module/module.go b/vendor/golang.org/x/mod/module/module.go index 9d3955bd7..739c13f48 100644 --- a/vendor/golang.org/x/mod/module/module.go +++ b/vendor/golang.org/x/mod/module/module.go @@ -802,8 +802,8 @@ func MatchPrefixPatterns(globs, target string) bool { for globs != "" { // Extract next non-empty glob in comma-separated list. var glob string - if i := strings.Index(globs, ","); i >= 0 { - glob, globs = globs[:i], globs[i+1:] + if before, after, ok := strings.Cut(globs, ","); ok { + glob, globs = before, after } else { glob, globs = globs, "" } diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 1965913e5..ccb87e6da 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -376,11 +376,24 @@ type ClientConn struct { // completely unresponsive connection. pendingResets int + // readBeforeStreamID is the smallest stream ID that has not been followed by + // a frame read from the peer. We use this to determine when a request may + // have been sent to a completely unresponsive connection: + // If the request ID is less than readBeforeStreamID, then we have had some + // indication of life on the connection since sending the request. + readBeforeStreamID uint32 + // reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests. // Write to reqHeaderMu to lock it, read from it to unlock. // Lock reqmu BEFORE mu or wmu. reqHeaderMu chan struct{} + // internalStateHook reports state changes back to the net/http.ClientConn. + // Note that this is different from the user state hook registered by + // net/http.ClientConn.SetStateHook: The internal hook calls ClientConn, + // which calls the user hook. + internalStateHook func() + // wmu is held while writing. // Acquire BEFORE mu when holding both, to avoid blocking mu on network writes. // Only acquire both at the same time when changing peer settings. @@ -710,7 +723,7 @@ func canRetryError(err error) bool { func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse bool) (*ClientConn, error) { if t.transportTestHooks != nil { - return t.newClientConn(nil, singleUse) + return t.newClientConn(nil, singleUse, nil) } host, _, err := net.SplitHostPort(addr) if err != nil { @@ -720,7 +733,7 @@ func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse b if err != nil { return nil, err } - return t.newClientConn(tconn, singleUse) + return t.newClientConn(tconn, singleUse, nil) } func (t *Transport) newTLSConfig(host string) *tls.Config { @@ -772,10 +785,10 @@ func (t *Transport) expectContinueTimeout() time.Duration { } func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { - return t.newClientConn(c, t.disableKeepAlives()) + return t.newClientConn(c, t.disableKeepAlives(), nil) } -func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { +func (t *Transport) newClientConn(c net.Conn, singleUse bool, internalStateHook func()) (*ClientConn, error) { conf := configFromTransport(t) cc := &ClientConn{ t: t, @@ -797,6 +810,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro pings: make(map[[8]byte]chan struct{}), reqHeaderMu: make(chan struct{}, 1), lastActive: time.Now(), + internalStateHook: internalStateHook, } if t.transportTestHooks != nil { t.transportTestHooks.newclientconn(cc) @@ -1037,10 +1051,7 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) { maxConcurrentOkay = cc.currentRequestCountLocked() < int(cc.maxConcurrentStreams) } - st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && maxConcurrentOkay && - !cc.doNotReuse && - int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 && - !cc.tooIdleLocked() + st.canTakeNewRequest = maxConcurrentOkay && cc.isUsableLocked() // If this connection has never been used for a request and is closed, // then let it take a request (which will fail). @@ -1056,6 +1067,31 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) { return } +func (cc *ClientConn) isUsableLocked() bool { + return cc.goAway == nil && + !cc.closed && + !cc.closing && + !cc.doNotReuse && + int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 && + !cc.tooIdleLocked() +} + +// canReserveLocked reports whether a net/http.ClientConn can reserve a slot on this conn. +// +// This follows slightly different rules than clientConnIdleState.canTakeNewRequest. +// We only permit reservations up to the conn's concurrency limit. +// This differs from ClientConn.ReserveNewRequest, which permits reservations +// past the limit when StrictMaxConcurrentStreams is set. +func (cc *ClientConn) canReserveLocked() bool { + if cc.currentRequestCountLocked() >= int(cc.maxConcurrentStreams) { + return false + } + if !cc.isUsableLocked() { + return false + } + return true +} + // currentRequestCountLocked reports the number of concurrency slots currently in use, // including active streams, reserved slots, and reset streams waiting for acknowledgement. func (cc *ClientConn) currentRequestCountLocked() int { @@ -1067,6 +1103,14 @@ func (cc *ClientConn) canTakeNewRequestLocked() bool { return st.canTakeNewRequest } +// availableLocked reports the number of concurrency slots available. +func (cc *ClientConn) availableLocked() int { + if !cc.canTakeNewRequestLocked() { + return 0 + } + return max(0, int(cc.maxConcurrentStreams)-cc.currentRequestCountLocked()) +} + // tooIdleLocked reports whether this connection has been been sitting idle // for too much wall time. func (cc *ClientConn) tooIdleLocked() bool { @@ -1091,6 +1135,7 @@ func (cc *ClientConn) closeConn() { t := time.AfterFunc(250*time.Millisecond, cc.forceCloseConn) defer t.Stop() cc.tconn.Close() + cc.maybeCallStateHook() } // A tls.Conn.Close can hang for a long time if the peer is unresponsive. @@ -1616,6 +1661,8 @@ func (cs *clientStream) cleanupWriteRequest(err error) { } bodyClosed := cs.reqBodyClosed closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil + // Have we read any frames from the connection since sending this request? + readSinceStream := cc.readBeforeStreamID > cs.ID cc.mu.Unlock() if mustCloseBody { cs.reqBody.Close() @@ -1647,8 +1694,10 @@ func (cs *clientStream) cleanupWriteRequest(err error) { // // This could be due to the server becoming unresponsive. // To avoid sending too many requests on a dead connection, - // we let the request continue to consume a concurrency slot - // until we can confirm the server is still responding. + // if we haven't read any frames from the connection since + // sending this request, we let it continue to consume + // a concurrency slot until we can confirm the server is + // still responding. // We do this by sending a PING frame along with the RST_STREAM // (unless a ping is already in flight). // @@ -1659,7 +1708,7 @@ func (cs *clientStream) cleanupWriteRequest(err error) { // because it's short lived and will probably be closed before // we get the ping response. ping := false - if !closeOnIdle { + if !closeOnIdle && !readSinceStream { cc.mu.Lock() // rstStreamPingsBlocked works around a gRPC behavior: // see comment on the field for details. @@ -1693,6 +1742,7 @@ func (cs *clientStream) cleanupWriteRequest(err error) { } close(cs.donec) + cc.maybeCallStateHook() } // awaitOpenSlotForStreamLocked waits until len(streams) < maxConcurrentStreams. @@ -2745,6 +2795,7 @@ func (rl *clientConnReadLoop) streamByID(id uint32, headerOrData bool) *clientSt // See comment on ClientConn.rstStreamPingsBlocked for details. rl.cc.rstStreamPingsBlocked = false } + rl.cc.readBeforeStreamID = rl.cc.nextStreamID cs := rl.cc.streams[id] if cs != nil && !cs.readAborted { return cs @@ -2795,6 +2846,7 @@ func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error { func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { cc := rl.cc + defer cc.maybeCallStateHook() cc.mu.Lock() defer cc.mu.Unlock() @@ -2975,6 +3027,7 @@ func (cc *ClientConn) Ping(ctx context.Context) error { func (rl *clientConnReadLoop) processPing(f *PingFrame) error { if f.IsAck() { cc := rl.cc + defer cc.maybeCallStateHook() cc.mu.Lock() defer cc.mu.Unlock() // If ack, notify listener if any @@ -3198,9 +3251,13 @@ func registerHTTPSProtocol(t *http.Transport, rt noDialH2RoundTripper) (err erro } // noDialH2RoundTripper is a RoundTripper which only tries to complete the request -// if there's already has a cached connection to the host. +// if there's already a cached connection to the host. // (The field is exported so it can be accessed via reflect from net/http; tested // by TestNoDialH2RoundTripperType) +// +// A noDialH2RoundTripper is registered with http1.Transport.RegisterProtocol, +// and the http1.Transport can use type assertions to call non-RoundTrip methods on it. +// This lets us expose, for example, NewClientConn to net/http. type noDialH2RoundTripper struct{ *Transport } func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { @@ -3211,6 +3268,85 @@ func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, err return res, err } +func (rt noDialH2RoundTripper) NewClientConn(conn net.Conn, internalStateHook func()) (http.RoundTripper, error) { + tr := rt.Transport + cc, err := tr.newClientConn(conn, tr.disableKeepAlives(), internalStateHook) + if err != nil { + return nil, err + } + + // RoundTrip should block when the conn is at its concurrency limit, + // not return an error. Setting strictMaxConcurrentStreams enables this. + cc.strictMaxConcurrentStreams = true + + return netHTTPClientConn{cc}, nil +} + +// netHTTPClientConn wraps ClientConn and implements the interface net/http expects from +// the RoundTripper returned by NewClientConn. +type netHTTPClientConn struct { + cc *ClientConn +} + +func (cc netHTTPClientConn) RoundTrip(req *http.Request) (*http.Response, error) { + return cc.cc.RoundTrip(req) +} + +func (cc netHTTPClientConn) Close() error { + return cc.cc.Close() +} + +func (cc netHTTPClientConn) Err() error { + cc.cc.mu.Lock() + defer cc.cc.mu.Unlock() + if cc.cc.closed { + return errors.New("connection closed") + } + return nil +} + +func (cc netHTTPClientConn) Reserve() error { + defer cc.cc.maybeCallStateHook() + cc.cc.mu.Lock() + defer cc.cc.mu.Unlock() + if !cc.cc.canReserveLocked() { + return errors.New("connection is unavailable") + } + cc.cc.streamsReserved++ + return nil +} + +func (cc netHTTPClientConn) Release() { + defer cc.cc.maybeCallStateHook() + cc.cc.mu.Lock() + defer cc.cc.mu.Unlock() + // We don't complain if streamsReserved is 0. + // + // This is consistent with RoundTrip: both Release and RoundTrip will + // consume a reservation iff one exists. + if cc.cc.streamsReserved > 0 { + cc.cc.streamsReserved-- + } +} + +func (cc netHTTPClientConn) Available() int { + cc.cc.mu.Lock() + defer cc.cc.mu.Unlock() + return cc.cc.availableLocked() +} + +func (cc netHTTPClientConn) InFlight() int { + cc.cc.mu.Lock() + defer cc.cc.mu.Unlock() + return cc.cc.currentRequestCountLocked() +} + +func (cc *ClientConn) maybeCallStateHook() { + if cc.internalStateHook != nil { + cc.internalStateHook() + } +} + func (t *Transport) idleConnTimeout() time.Duration { // to keep things backwards compatible, we use non-zero values of // IdleConnTimeout, followed by using the IdleConnTimeout on the underlying diff --git a/vendor/golang.org/x/net/trace/events.go b/vendor/golang.org/x/net/trace/events.go index 3aaffdd1f..c2b3c0098 100644 --- a/vendor/golang.org/x/net/trace/events.go +++ b/vendor/golang.org/x/net/trace/events.go @@ -58,8 +58,8 @@ func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) { Buckets: buckets, } - data.Families = make([]string, 0, len(families)) famMu.RLock() + data.Families = make([]string, 0, len(families)) for name := range families { data.Families = append(data.Families, name) } diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go index 2f45dbc86..f69fd7546 100644 --- a/vendor/golang.org/x/sync/errgroup/errgroup.go +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -144,8 +144,8 @@ func (g *Group) SetLimit(n int) { g.sem = nil return } - if len(g.sem) != 0 { - panic(fmt.Errorf("errgroup: modify limit while %v goroutines in the group are still active", len(g.sem))) + if active := len(g.sem); active != 0 { + panic(fmt.Errorf("errgroup: modify limit while %v goroutines in the group are still active", active)) } g.sem = make(chan token, n) } diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go index 34c9ae76e..63541994e 100644 --- a/vendor/golang.org/x/sys/cpu/cpu.go +++ b/vendor/golang.org/x/sys/cpu/cpu.go @@ -92,9 +92,6 @@ var ARM64 struct { HasSHA2 bool // SHA2 hardware implementation HasCRC32 bool // CRC32 hardware implementation HasATOMICS bool // Atomic memory operation instruction set - HasHPDS bool // Hierarchical permission disables in translations tables - HasLOR bool // Limited ordering regions - HasPAN bool // Privileged access never HasFPHP bool // Half precision floating-point instruction set HasASIMDHP bool // Advanced SIMD half precision instruction set HasCPUID bool // CPUID identification scheme registers diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_arm64.go index f449c679f..af2aa99f9 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.go @@ -65,10 +65,10 @@ func setMinimalFeatures() { func readARM64Registers() { Initialized = true - parseARM64SystemRegisters(getisar0(), getisar1(), getmmfr1(), getpfr0()) + parseARM64SystemRegisters(getisar0(), getisar1(), getpfr0()) } -func parseARM64SystemRegisters(isar0, isar1, mmfr1, pfr0 uint64) { +func parseARM64SystemRegisters(isar0, isar1, pfr0 uint64) { // ID_AA64ISAR0_EL1 switch extractBits(isar0, 4, 7) { case 1: @@ -152,22 +152,6 @@ func parseARM64SystemRegisters(isar0, isar1, mmfr1, pfr0 uint64) { ARM64.HasI8MM = true } - // ID_AA64MMFR1_EL1 - switch extractBits(mmfr1, 12, 15) { - case 1, 2: - ARM64.HasHPDS = true - } - - switch extractBits(mmfr1, 16, 19) { - case 1: - ARM64.HasLOR = true - } - - switch extractBits(mmfr1, 20, 23) { - case 1, 2, 3: - ARM64.HasPAN = true - } - // ID_AA64PFR0_EL1 switch extractBits(pfr0, 16, 19) { case 0: diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.s b/vendor/golang.org/x/sys/cpu/cpu_arm64.s index a4f24b3b0..3b0450a06 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_arm64.s +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.s @@ -20,13 +20,6 @@ TEXT ·getisar1(SB),NOSPLIT,$0-8 MOVD R0, ret+0(FP) RET -// func getmmfr1() uint64 -TEXT ·getmmfr1(SB),NOSPLIT,$0-8 - // get Memory Model Feature Register 1 into x0 - MRS ID_AA64MMFR1_EL1, R0 - MOVD R0, ret+0(FP) - RET - // func getpfr0() uint64 TEXT ·getpfr0(SB),NOSPLIT,$0-8 // get Processor Feature Register 0 into x0 diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go index e3fc5a8d3..6ac6e1efb 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go @@ -8,6 +8,5 @@ package cpu func getisar0() uint64 func getisar1() uint64 -func getmmfr1() uint64 func getpfr0() uint64 func getzfr0() uint64 diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go index 8df2079e1..7f1946780 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go @@ -8,5 +8,4 @@ package cpu func getisar0() uint64 { return 0 } func getisar1() uint64 { return 0 } -func getmmfr1() uint64 { return 0 } func getpfr0() uint64 { return 0 } diff --git a/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go index 19aea0633..ebfb3fc8e 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go @@ -167,7 +167,7 @@ func doinit() { setMinimalFeatures() return } - parseARM64SystemRegisters(cpuid.aa64isar0, cpuid.aa64isar1, cpuid.aa64mmfr1, cpuid.aa64pfr0) + parseARM64SystemRegisters(cpuid.aa64isar0, cpuid.aa64isar1, cpuid.aa64pfr0) Initialized = true } diff --git a/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go index 87fd3a778..85b64d5cc 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go @@ -59,7 +59,7 @@ func doinit() { if !ok { return } - parseARM64SystemRegisters(isar0, isar1, 0, 0) + parseARM64SystemRegisters(isar0, isar1, 0) Initialized = true } diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 42517077c..fd39be4ef 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -256,6 +256,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -613,7 +614,7 @@ ccflags="$@" $2 !~ /IOC_MAGIC/ && $2 ~ /^[A-Z][A-Z0-9_]+_MAGIC2?$/ || $2 ~ /^(VM|VMADDR)_/ || - $2 ~ /^IOCTL_VM_SOCKETS_/ || + $2 ~ /^(IOCTL_VM_SOCKETS_|IOCTL_MEI_)/ || $2 ~ /^(TASKSTATS|TS)_/ || $2 ~ /^CGROUPSTATS_/ || $2 ~ /^GENL_/ || diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index d0a75da57..120a7b35d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -1615,6 +1615,8 @@ const ( IN_OPEN = 0x20 IN_Q_OVERFLOW = 0x4000 IN_UNMOUNT = 0x2000 + IOCTL_MEI_CONNECT_CLIENT = 0xc0104801 + IOCTL_MEI_CONNECT_CLIENT_VTAG = 0xc0144804 IPPROTO_AH = 0x33 IPPROTO_BEETPH = 0x5e IPPROTO_COMP = 0x6c diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 1c37f9fbc..97a61fc5b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -116,6 +116,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 6f54d34ae..a0d6d498c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -116,6 +116,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 783ec5c12..dd9c903f9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index ca83d3ba1..384c61ca3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -120,6 +120,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 607e611c0..6384c9831 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -116,6 +116,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index b9cb5bd3c..553c1c6f1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x100 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xfffffff IPV6_FLOWLABEL_MASK = 0xfffff diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 65b078a63..b3339f209 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x100 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xfffffff IPV6_FLOWLABEL_MASK = 0xfffff diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 5298a3033..177091d2b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x100 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 7bc557c87..c5abf156d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x100 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 152399bb0..f1f3fadf5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x400 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xfffffff IPV6_FLOWLABEL_MASK = 0xfffff diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 1a1ce2409..203ad9c54 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x400 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xfffffff IPV6_FLOWLABEL_MASK = 0xfffff diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 4231a1fb5..4b9abcb21 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x400 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 21c0e9526..f87983037 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IPV6_FLOWINFO_MASK = 0xffffff0f IPV6_FLOWLABEL_MASK = 0xffff0f00 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index f00d1cd7c..64347eb35 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -115,6 +115,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 + IOCTL_MEI_NOTIFY_GET = 0x80044803 + IOCTL_MEI_NOTIFY_SET = 0x40044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 IPV6_FLOWINFO_MASK = 0xfffffff IPV6_FLOWLABEL_MASK = 0xfffff diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index bc8d539e6..7d7191171 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -119,6 +119,8 @@ const ( IEXTEN = 0x8000 IN_CLOEXEC = 0x400000 IN_NONBLOCK = 0x4000 + IOCTL_MEI_NOTIFY_GET = 0x40044803 + IOCTL_MEI_NOTIFY_SET = 0x80044802 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 IPV6_FLOWINFO_MASK = 0xfffffff IPV6_FLOWLABEL_MASK = 0xfffff diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go index 439548ec9..50e8e6449 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go @@ -104,7 +104,7 @@ type Statvfs_t struct { Fsid uint32 Namemax uint32 Owner uint32 - Spare [4]uint32 + Spare [4]uint64 Fstypename [32]byte Mntonname [1024]byte Mntfromname [1024]byte diff --git a/vendor/golang.org/x/text/encoding/unicode/unicode.go b/vendor/golang.org/x/text/encoding/unicode/unicode.go index dd99ad14d..ce28c9062 100644 --- a/vendor/golang.org/x/text/encoding/unicode/unicode.go +++ b/vendor/golang.org/x/text/encoding/unicode/unicode.go @@ -60,9 +60,9 @@ func (utf8bomEncoding) NewDecoder() *encoding.Decoder { } var utf8enc = &internal.Encoding{ - &internal.SimpleEncoding{utf8Decoder{}, runes.ReplaceIllFormed()}, - "UTF-8", - identifier.UTF8, + Encoding: &internal.SimpleEncoding{Decoder: utf8Decoder{}, Encoder: runes.ReplaceIllFormed()}, + Name: "UTF-8", + MIB: identifier.UTF8, } type utf8bomDecoder struct { diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go index 794b2e32b..563270c15 100644 --- a/vendor/golang.org/x/time/rate/rate.go +++ b/vendor/golang.org/x/time/rate/rate.go @@ -195,7 +195,7 @@ func (r *Reservation) CancelAt(t time.Time) { // update state r.lim.last = t r.lim.tokens = tokens - if r.timeToAct == r.lim.lastEvent { + if r.timeToAct.Equal(r.lim.lastEvent) { prevEvent := r.timeToAct.Add(r.limit.durationFromTokens(float64(-r.tokens))) if !prevEvent.Before(t) { r.lim.lastEvent = prevEvent diff --git a/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go b/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go index 0180a341e..bc15ef8b9 100644 --- a/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go +++ b/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go @@ -27,6 +27,7 @@ package unitchecker // printf checker. import ( + "archive/zip" "encoding/gob" "encoding/json" "flag" @@ -74,6 +75,7 @@ type Config struct { VetxOnly bool // run analysis only for facts, not diagnostics VetxOutput string // where to write file of fact information Stdout string // write stdout (e.g. JSON, unified diff) to this file + FixArchive string // write fixed files to this zip archive, if non-empty SucceedOnTypecheckFailure bool // obsolete awful hack; see #18395 and below } @@ -153,7 +155,7 @@ func Run(configFile string, analyzers []*analysis.Analyzer) { // In VetxOnly mode, the analysis is run only for facts. if !cfg.VetxOnly { - code = processResults(fset, cfg.ID, results) + code = processResults(fset, cfg.ID, cfg.FixArchive, results) } os.Exit(code) @@ -177,7 +179,7 @@ func readConfig(filename string) (*Config, error) { return cfg, nil } -func processResults(fset *token.FileSet, id string, results []result) (exit int) { +func processResults(fset *token.FileSet, id, fixArchive string, results []result) (exit int) { if analysisflags.Fix { // Don't print the diagnostics, // but apply all fixes from the root actions. @@ -194,7 +196,40 @@ func processResults(fset *token.FileSet, id string, results []result) (exit int) Diagnostics: res.diagnostics, } } - if err := driverutil.ApplyFixes(fixActions, analysisflags.Diff, false); err != nil { + + // By default, fixes overwrite the original file. + // With the -diff flag, print the diffs to stdout. + // If "go fix" provides a fix archive, we write files + // into it so that mutations happen after the build. + write := func(filename string, content []byte) error { + return os.WriteFile(filename, content, 0644) + } + if fixArchive != "" { + f, err := os.Create(fixArchive) + if err != nil { + log.Fatalf("can't create -fix archive: %v", err) + } + zw := zip.NewWriter(f) + zw.SetComment(id) // ignore error + defer func() { + if err := zw.Close(); err != nil { + log.Fatalf("closing -fix archive zip writer: %v", err) + } + if err := f.Close(); err != nil { + log.Fatalf("closing -fix archive file: %v", err) + } + }() + write = func(filename string, content []byte) error { + f, err := zw.Create(filename) + if err != nil { + return err + } + _, err = f.Write(content) + return err + } + } + + if err := driverutil.ApplyFixes(fixActions, write, analysisflags.Diff, false); err != nil { // Fail when applying fixes failed. log.Print(err) exit = 1 diff --git a/vendor/golang.org/x/tools/go/ast/astutil/imports.go b/vendor/golang.org/x/tools/go/ast/astutil/imports.go index 5bacc0fa4..adb471101 100644 --- a/vendor/golang.org/x/tools/go/ast/astutil/imports.go +++ b/vendor/golang.org/x/tools/go/ast/astutil/imports.go @@ -9,6 +9,7 @@ import ( "fmt" "go/ast" "go/token" + "reflect" "slices" "strconv" "strings" @@ -149,7 +150,7 @@ func AddNamedImport(fset *token.FileSet, f *ast.File, name, path string) (added if newImport.Name != nil { newImport.Name.NamePos = pos } - newImport.Path.ValuePos = pos + updateBasicLitPos(newImport.Path, pos) newImport.EndPos = pos // Clean up parens. impDecl contains at least one spec. @@ -184,7 +185,7 @@ func AddNamedImport(fset *token.FileSet, f *ast.File, name, path string) (added first.Lparen = first.Pos() // Move the imports of the other import declaration to the first one. for _, spec := range gen.Specs { - spec.(*ast.ImportSpec).Path.ValuePos = first.Pos() + updateBasicLitPos(spec.(*ast.ImportSpec).Path, first.Pos()) first.Specs = append(first.Specs, spec) } f.Decls = slices.Delete(f.Decls, i, i+1) @@ -470,3 +471,17 @@ func Imports(fset *token.FileSet, f *ast.File) [][]*ast.ImportSpec { return groups } + +// updateBasicLitPos updates lit.Pos, +// ensuring that lit.End (if set) is displaced by the same amount. +// (See https://go.dev/issue/76395.) +func updateBasicLitPos(lit *ast.BasicLit, pos token.Pos) { + len := lit.End() - lit.Pos() + lit.ValuePos = pos + // TODO(adonovan): after go1.26, simplify to: + // lit.ValueEnd = pos + len + v := reflect.ValueOf(lit).Elem().FieldByName("ValueEnd") + if v.IsValid() && v.Int() != 0 { + v.SetInt(int64(pos + len)) + } +} diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index 060ab08ef..ff607389d 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -1027,11 +1027,15 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { // Precondition: ld.Mode&(NeedSyntax|NeedTypes|NeedTypesInfo) != 0. func (ld *loader) loadPackage(lpkg *loaderPackage) { if lpkg.PkgPath == "unsafe" { - // Fill in the blanks to avoid surprises. + // To avoid surprises, fill in the blanks consistent + // with other packages. (For example, some analyzers + // assert that each needed types.Info map is non-nil + // even when there is no syntax that would cause them + // to consult the map.) lpkg.Types = types.Unsafe lpkg.Fset = ld.Fset lpkg.Syntax = []*ast.File{} - lpkg.TypesInfo = new(types.Info) + lpkg.TypesInfo = ld.newTypesInfo() lpkg.TypesSizes = ld.sizes return } @@ -1180,20 +1184,7 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { return } - // Populate TypesInfo only if needed, as it - // causes the type checker to work much harder. - if ld.Config.Mode&NeedTypesInfo != 0 { - lpkg.TypesInfo = &types.Info{ - Types: make(map[ast.Expr]types.TypeAndValue), - Defs: make(map[*ast.Ident]types.Object), - Uses: make(map[*ast.Ident]types.Object), - Implicits: make(map[ast.Node]types.Object), - Instances: make(map[*ast.Ident]types.Instance), - Scopes: make(map[ast.Node]*types.Scope), - Selections: make(map[*ast.SelectorExpr]*types.Selection), - FileVersions: make(map[*ast.File]string), - } - } + lpkg.TypesInfo = ld.newTypesInfo() lpkg.TypesSizes = ld.sizes importer := importerFunc(func(path string) (*types.Package, error) { @@ -1307,6 +1298,24 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { lpkg.IllTyped = illTyped } +func (ld *loader) newTypesInfo() *types.Info { + // Populate TypesInfo only if needed, as it + // causes the type checker to work much harder. + if ld.Config.Mode&NeedTypesInfo == 0 { + return nil + } + return &types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Implicits: make(map[ast.Node]types.Object), + Instances: make(map[*ast.Ident]types.Instance), + Scopes: make(map[ast.Node]*types.Scope), + Selections: make(map[*ast.SelectorExpr]*types.Selection), + FileVersions: make(map[*ast.File]string), + } +} + // An importFunc is an implementation of the single-method // types.Importer interface based on a function value. type importerFunc func(path string) (*types.Package, error) diff --git a/vendor/golang.org/x/tools/go/types/typeutil/callee.go b/vendor/golang.org/x/tools/go/types/typeutil/callee.go index 5f10f56cb..3d24a8c63 100644 --- a/vendor/golang.org/x/tools/go/types/typeutil/callee.go +++ b/vendor/golang.org/x/tools/go/types/typeutil/callee.go @@ -12,6 +12,7 @@ import ( // Callee returns the named target of a function call, if any: // a function, method, builtin, or variable. +// It returns nil for a T(x) conversion. // // Functions and methods may potentially have type parameters. // diff --git a/vendor/golang.org/x/tools/internal/analysis/driverutil/fix.go b/vendor/golang.org/x/tools/internal/analysis/driverutil/fix.go index 763650c74..7769b39be 100644 --- a/vendor/golang.org/x/tools/internal/analysis/driverutil/fix.go +++ b/vendor/golang.org/x/tools/internal/analysis/driverutil/fix.go @@ -93,11 +93,13 @@ type FixAction struct { // // If printDiff (from the -diff flag) is set, instead of updating the // files it display the final patch composed of all the cleanly merged -// fixes. +// fixes. (It is tempting to factor printDiff as just a variant of +// writeFile that is provided the old and new content, but it's hard +// to generate a good summary that way.) // // TODO(adonovan): handle file-system level aliases such as symbolic // links using robustio.FileID. -func ApplyFixes(actions []FixAction, printDiff, verbose bool) error { +func ApplyFixes(actions []FixAction, writeFile func(filename string, content []byte) error, printDiff, verbose bool) error { generated := make(map[*token.File]bool) // Select fixes to apply. @@ -160,7 +162,9 @@ func ApplyFixes(actions []FixAction, printDiff, verbose bool) error { var ( accumulatedEdits = make(map[string][]diff.Edit) filePkgs = make(map[string]*types.Package) // maps each file to an arbitrary package that includes it - goodFixes = 0 + + goodFixes = 0 // number of fixes cleanly applied + skippedFixes = 0 // number of fixes skipped (because e.g. edits a generated file) ) fixloop: for _, fixact := range fixes { @@ -168,6 +172,7 @@ fixloop: for _, edit := range fixact.fix.TextEdits { file := fixact.act.FileSet.File(edit.Pos) if generated[file] { + skippedFixes++ continue fixloop } } @@ -227,7 +232,7 @@ fixloop: log.Printf("%s: fix %s applied", fixact.act.Name, fixact.fix.Message) } } - badFixes := len(fixes) - goodFixes + badFixes := len(fixes) - goodFixes - skippedFixes // number of fixes that could not be applied // Show diff or update files to final state. var files []string @@ -261,12 +266,11 @@ fixloop: os.Stdout.WriteString(unified) } else { - // write + // write file totalFiles++ - // TODO(adonovan): abstract the I/O. - if err := os.WriteFile(file, final, 0644); err != nil { + if err := writeFile(file, final); err != nil { log.Println(err) - continue + continue // (causes ApplyFix to return an error) } filesUpdated++ } @@ -305,15 +309,25 @@ fixloop: // TODO(adonovan): should we log that n files were updated in case of total victory? if badFixes > 0 || filesUpdated < totalFiles { if printDiff { - return fmt.Errorf("%d of %d fixes skipped (e.g. due to conflicts)", badFixes, len(fixes)) + return fmt.Errorf("%d of %s skipped (e.g. due to conflicts)", + badFixes, + plural(len(fixes), "fix", "fixes")) } else { - return fmt.Errorf("applied %d of %d fixes; %d files updated. (Re-run the command to apply more.)", - goodFixes, len(fixes), filesUpdated) + return fmt.Errorf("applied %d of %s; %s updated. (Re-run the command to apply more.)", + goodFixes, + plural(len(fixes), "fix", "fixes"), + plural(filesUpdated, "file", "files")) } } if verbose { - log.Printf("applied %d fixes, updated %d files", len(fixes), filesUpdated) + if skippedFixes > 0 { + log.Printf("skipped %s that would edit generated files", + plural(skippedFixes, "fix", "fixes")) + } + log.Printf("applied %s, updated %s", + plural(len(fixes), "fix", "fixes"), + plural(filesUpdated, "file", "files")) } return nil @@ -326,6 +340,9 @@ fixloop: // information for the fixed file and thus cannot accurately tell // whether k is among the free names of T{k: 0}, which requires // knowledge of whether T is a struct type. +// +// Like [imports.Process] (the core of x/tools/cmd/goimports), it also +// merges import decls. func FormatSourceRemoveImports(pkg *types.Package, src []byte) ([]byte, error) { // This function was reduced from the "strict entire file" // path through [format.Source]. @@ -340,6 +357,10 @@ func FormatSourceRemoveImports(pkg *types.Package, src []byte) ([]byte, error) { removeUnneededImports(fset, pkg, file) + // TODO(adonovan): to generate cleaner edits when adding an import, + // consider adding a call to imports.mergeImports; however, it does + // cause comments to migrate. + // printerNormalizeNumbers means to canonicalize number literal prefixes // and exponents while printing. See https://golang.org/doc/go1.13#gofmt. // @@ -425,3 +446,12 @@ func removeUnneededImports(fset *token.FileSet, pkg *types.Package, file *ast.Fi del() } } + +// plural returns "n nouns", selecting the plural form as approriate. +func plural(n int, singular, plural string) string { + if n == 1 { + return "1 " + singular + } else { + return fmt.Sprintf("%d %s", n, plural) + } +} diff --git a/vendor/golang.org/x/tools/internal/analysis/driverutil/print.go b/vendor/golang.org/x/tools/internal/analysis/driverutil/print.go index 7fc42a5ef..545884685 100644 --- a/vendor/golang.org/x/tools/internal/analysis/driverutil/print.go +++ b/vendor/golang.org/x/tools/internal/analysis/driverutil/print.go @@ -7,6 +7,7 @@ package driverutil // This file defined output helpers common to all drivers. import ( + "cmp" "encoding/json" "fmt" "go/token" @@ -76,11 +77,10 @@ type JSONSuggestedFix struct { } // A JSONDiagnostic describes the JSON schema of an analysis.Diagnostic. -// -// TODO(matloob): include End position if present. type JSONDiagnostic struct { Category string `json:"category,omitempty"` Posn string `json:"posn"` // e.g. "file.go:line:column" + End string `json:"end"` // (ditto) Message string `json:"message"` SuggestedFixes []JSONSuggestedFix `json:"suggested_fixes,omitempty"` Related []JSONRelatedInformation `json:"related,omitempty"` @@ -88,10 +88,9 @@ type JSONDiagnostic struct { // A JSONRelated describes a secondary position and message related to // a primary diagnostic. -// -// TODO(adonovan): include End position if present. type JSONRelatedInformation struct { Posn string `json:"posn"` // e.g. "file.go:line:column" + End string `json:"end"` // (ditto) Message string `json:"message"` } @@ -127,12 +126,14 @@ func (tree JSONTree) Add(fset *token.FileSet, id, name string, diags []analysis. for _, r := range f.Related { related = append(related, JSONRelatedInformation{ Posn: fset.Position(r.Pos).String(), + End: fset.Position(cmp.Or(r.End, r.Pos)).String(), Message: r.Message, }) } jdiag := JSONDiagnostic{ Category: f.Category, Posn: fset.Position(f.Pos).String(), + End: fset.Position(cmp.Or(f.End, f.Pos)).String(), Message: f.Message, SuggestedFixes: fixes, Related: related, diff --git a/vendor/golang.org/x/tools/internal/diff/lcs/old.go b/vendor/golang.org/x/tools/internal/diff/lcs/old.go index 7b7c5cc67..5acc68e1d 100644 --- a/vendor/golang.org/x/tools/internal/diff/lcs/old.go +++ b/vendor/golang.org/x/tools/internal/diff/lcs/old.go @@ -16,10 +16,6 @@ type Diff struct { ReplStart, ReplEnd int // offset of replacement text in B } -// DiffStrings returns the differences between two strings. -// It does not respect rune boundaries. -func DiffStrings(a, b string) []Diff { return diff(stringSeqs{a, b}) } - // DiffBytes returns the differences between two byte sequences. // It does not respect rune boundaries. func DiffBytes(a, b []byte) []Diff { return diff(bytesSeqs{a, b}) } diff --git a/vendor/golang.org/x/tools/internal/event/core/export.go b/vendor/golang.org/x/tools/internal/event/core/export.go index 05f3a9a57..16ae6bb02 100644 --- a/vendor/golang.org/x/tools/internal/event/core/export.go +++ b/vendor/golang.org/x/tools/internal/event/core/export.go @@ -8,7 +8,6 @@ import ( "context" "sync/atomic" "time" - "unsafe" "golang.org/x/tools/internal/event/label" ) @@ -17,23 +16,21 @@ import ( // It may return a modified context and event. type Exporter func(context.Context, Event, label.Map) context.Context -var ( - exporter unsafe.Pointer -) +var exporter atomic.Pointer[Exporter] // SetExporter sets the global exporter function that handles all events. // The exporter is called synchronously from the event call site, so it should // return quickly so as not to hold up user code. func SetExporter(e Exporter) { - p := unsafe.Pointer(&e) if e == nil { // &e is always valid, and so p is always valid, but for the early abort // of ProcessEvent to be efficient it needs to make the nil check on the // pointer without having to dereference it, so we make the nil function // also a nil pointer - p = nil + exporter.Store(nil) + } else { + exporter.Store(&e) } - atomic.StorePointer(&exporter, p) } // deliver is called to deliver an event to the supplied exporter. @@ -48,7 +45,7 @@ func deliver(ctx context.Context, exporter Exporter, ev Event) context.Context { // Export is called to deliver an event to the global exporter if set. func Export(ctx context.Context, ev Event) context.Context { // get the global exporter and abort early if there is not one - exporterPtr := (*Exporter)(atomic.LoadPointer(&exporter)) + exporterPtr := exporter.Load() if exporterPtr == nil { return ctx } @@ -61,7 +58,7 @@ func Export(ctx context.Context, ev Event) context.Context { // It will fill in the time. func ExportPair(ctx context.Context, begin, end Event) (context.Context, func()) { // get the global exporter and abort early if there is not one - exporterPtr := (*Exporter)(atomic.LoadPointer(&exporter)) + exporterPtr := exporter.Load() if exporterPtr == nil { return ctx, func() {} } diff --git a/vendor/golang.org/x/tools/internal/event/label/label.go b/vendor/golang.org/x/tools/internal/event/label/label.go index 92a391057..c37584af9 100644 --- a/vendor/golang.org/x/tools/internal/event/label/label.go +++ b/vendor/golang.org/x/tools/internal/event/label/label.go @@ -7,7 +7,6 @@ package label import ( "fmt" "io" - "reflect" "slices" "unsafe" ) @@ -103,11 +102,10 @@ type stringptr unsafe.Pointer // This method is for implementing new key types, label creation should // normally be done with the Of method of the key. func OfString(k Key, v string) Label { - hdr := (*reflect.StringHeader)(unsafe.Pointer(&v)) return Label{ key: k, - packed: uint64(hdr.Len), - untyped: stringptr(hdr.Data), + packed: uint64(len(v)), + untyped: stringptr(unsafe.StringData(v)), } } @@ -116,11 +114,7 @@ func OfString(k Key, v string) Label { // This method is for implementing new key types, for type safety normal // access should be done with the From method of the key. func (t Label) UnpackString() string { - var v string - hdr := (*reflect.StringHeader)(unsafe.Pointer(&v)) - hdr.Data = uintptr(t.untyped.(stringptr)) - hdr.Len = int(t.packed) - return v + return unsafe.String((*byte)(t.untyped.(stringptr)), int(t.packed)) } // Valid returns true if the Label is a valid one (it has a key). diff --git a/vendor/golang.org/x/tools/internal/stdlib/deps.go b/vendor/golang.org/x/tools/internal/stdlib/deps.go index 581784da4..f7b9c1286 100644 --- a/vendor/golang.org/x/tools/internal/stdlib/deps.go +++ b/vendor/golang.org/x/tools/internal/stdlib/deps.go @@ -12,360 +12,364 @@ type pkginfo struct { } var deps = [...]pkginfo{ - {"archive/tar", "\x03n\x03E<\x01\n\x01$\x01\x01\x02\x05\b\x02\x01\x02\x02\f"}, - {"archive/zip", "\x02\x04d\a\x03\x12\x021<\x01+\x05\x01\x0f\x03\x02\x0e\x04"}, - {"bufio", "\x03n\x84\x01D\x14"}, - {"bytes", "q*Z\x03\fG\x02\x02"}, + {"archive/tar", "\x03p\x03F=\x01\n\x01$\x01\x01\x02\x05\b\x02\x01\x02\x02\f"}, + {"archive/zip", "\x02\x04f\a\x03\x13\x021=\x01+\x05\x01\x0f\x03\x02\x0e\x04"}, + {"bufio", "\x03p\x86\x01D\x14"}, + {"bytes", "s+[\x03\fG\x02\x02"}, {"cmp", ""}, - {"compress/bzip2", "\x02\x02\xf1\x01A"}, - {"compress/flate", "\x02o\x03\x81\x01\f\x033\x01\x03"}, - {"compress/gzip", "\x02\x04d\a\x03\x14mT"}, - {"compress/lzw", "\x02o\x03\x81\x01"}, - {"compress/zlib", "\x02\x04d\a\x03\x12\x01n"}, - {"container/heap", "\xb7\x02"}, + {"compress/bzip2", "\x02\x02\xf5\x01A"}, + {"compress/flate", "\x02q\x03\x83\x01\f\x033\x01\x03"}, + {"compress/gzip", "\x02\x04f\a\x03\x15nT"}, + {"compress/lzw", "\x02q\x03\x83\x01"}, + {"compress/zlib", "\x02\x04f\a\x03\x13\x01o"}, + {"container/heap", "\xbb\x02"}, {"container/list", ""}, {"container/ring", ""}, - {"context", "q[o\x01\r"}, - {"crypto", "\x86\x01oC"}, - {"crypto/aes", "\x10\n\t\x95\x02"}, - {"crypto/cipher", "\x03 \x01\x01\x1f\x11\x1c+Y"}, - {"crypto/des", "\x10\x15\x1f-+\x9c\x01\x03"}, - {"crypto/dsa", "D\x04)\x84\x01\r"}, - {"crypto/ecdh", "\x03\v\f\x10\x04\x16\x04\r\x1c\x84\x01"}, - {"crypto/ecdsa", "\x0e\x05\x03\x04\x01\x10\a\v\x06\x01\x04\f\x01\x1c\x84\x01\r\x05K\x01"}, - {"crypto/ed25519", "\x0e\x1e\x11\a\n\a\x1c\x84\x01C"}, - {"crypto/elliptic", "2?\x84\x01\r9"}, + {"context", "s\\p\x01\r"}, + {"crypto", "\x89\x01pC"}, + {"crypto/aes", "\x10\n\t\x99\x02"}, + {"crypto/cipher", "\x03 \x01\x01 \x12\x1c,Z"}, + {"crypto/des", "\x10\x15 .,\x9d\x01\x03"}, + {"crypto/dsa", "E\x04*\x86\x01\r"}, + {"crypto/ecdh", "\x03\v\f\x10\x04\x17\x04\x0e\x1c\x86\x01"}, + {"crypto/ecdsa", "\x0e\x05\x03\x04\x01\x10\b\v\x06\x01\x04\r\x01\x1c\x86\x01\r\x05K\x01"}, + {"crypto/ed25519", "\x0e\x1e\x12\a\v\a\x1c\x86\x01C"}, + {"crypto/elliptic", "3@\x86\x01\r9"}, {"crypto/fips140", "\"\x05"}, - {"crypto/hkdf", "/\x14\x01-\x15"}, - {"crypto/hmac", "\x1a\x16\x13\x01\x111"}, - {"crypto/internal/boring", "\x0e\x02\ri"}, - {"crypto/internal/boring/bbig", "\x1a\xe8\x01M"}, - {"crypto/internal/boring/bcache", "\xbc\x02\x13"}, + {"crypto/hkdf", "/\x15\x01.\x16"}, + {"crypto/hmac", "\x1a\x16\x14\x01\x122"}, + {"crypto/internal/boring", "\x0e\x02\rl"}, + {"crypto/internal/boring/bbig", "\x1a\xec\x01M"}, + {"crypto/internal/boring/bcache", "\xc0\x02\x13"}, {"crypto/internal/boring/sig", ""}, {"crypto/internal/constanttime", ""}, - {"crypto/internal/cryptotest", "\x03\r\n\b%\x0e\x19\x06\x12\x12 \x04\x06\t\x18\x01\x11\x11\x1b\x01\a\x05\b\x03\x05\v"}, - {"crypto/internal/entropy", "I"}, - {"crypto/internal/entropy/v1.0.0", "B/\x93\x018\x13"}, - {"crypto/internal/fips140", "A0\xbd\x01\v\x16"}, - {"crypto/internal/fips140/aes", "\x03\x1f\x03\x02\x13\x05\x01\x01\x06*\x93\x014"}, - {"crypto/internal/fips140/aes/gcm", "\"\x01\x02\x02\x02\x11\x05\x01\a*\x90\x01"}, - {"crypto/internal/fips140/alias", "\xcf\x02"}, - {"crypto/internal/fips140/bigmod", "'\x18\x01\a*\x93\x01"}, - {"crypto/internal/fips140/check", "\"\x0e\x06\t\x02\xb4\x01Z"}, - {"crypto/internal/fips140/check/checktest", "'\x87\x02!"}, - {"crypto/internal/fips140/drbg", "\x03\x1e\x01\x01\x04\x13\x05\t\x01(\x84\x01\x0f7\x01"}, - {"crypto/internal/fips140/ecdh", "\x03\x1f\x05\x02\t\r2\x84\x01\x0f7"}, - {"crypto/internal/fips140/ecdsa", "\x03\x1f\x04\x01\x02\a\x02\x069\x15oF"}, - {"crypto/internal/fips140/ed25519", "\x03\x1f\x05\x02\x04\v9\xc7\x01\x03"}, - {"crypto/internal/fips140/edwards25519", "\x1e\t\a\x112\x93\x017"}, - {"crypto/internal/fips140/edwards25519/field", "'\x13\x052\x93\x01"}, - {"crypto/internal/fips140/hkdf", "\x03\x1f\x05\t\x06;\x15"}, - {"crypto/internal/fips140/hmac", "\x03\x1f\x14\x01\x019\x15"}, - {"crypto/internal/fips140/mlkem", "\x03\x1f\x05\x02\x0e\x03\x052\xca\x01"}, - {"crypto/internal/fips140/nistec", "\x1e\t\f\f2\x93\x01*\r\x14"}, - {"crypto/internal/fips140/nistec/fiat", "'\x137\x93\x01"}, - {"crypto/internal/fips140/pbkdf2", "\x03\x1f\x05\t\x06;\x15"}, - {"crypto/internal/fips140/rsa", "\x03\x1b\x04\x04\x01\x02\r\x01\x01\x027\x15oF"}, - {"crypto/internal/fips140/sha256", "\x03\x1f\x1d\x01\a*\x15~"}, - {"crypto/internal/fips140/sha3", "\x03\x1f\x18\x05\x011\x93\x01K"}, - {"crypto/internal/fips140/sha512", "\x03\x1f\x1d\x01\a*\x15~"}, - {"crypto/internal/fips140/ssh", "'_"}, - {"crypto/internal/fips140/subtle", "\x1e\a\x1a\xc5\x01"}, - {"crypto/internal/fips140/tls12", "\x03\x1f\x05\t\x06\x029\x15"}, - {"crypto/internal/fips140/tls13", "\x03\x1f\x05\b\a\t2\x15"}, - {"crypto/internal/fips140cache", "\xae\x02\r&"}, + {"crypto/internal/cryptotest", "\x03\r\n\b&\x0f\x19\x06\x13\x12 \x04\x06\t\x19\x01\x11\x11\x1b\x01\a\x05\b\x03\x05\v"}, + {"crypto/internal/entropy", "J"}, + {"crypto/internal/entropy/v1.0.0", "C0\x95\x018\x13"}, + {"crypto/internal/fips140", "B1\xbf\x01\v\x16"}, + {"crypto/internal/fips140/aes", "\x03\x1f\x03\x02\x14\x05\x01\x01\x06+\x95\x014"}, + {"crypto/internal/fips140/aes/gcm", "\"\x01\x02\x02\x02\x12\x05\x01\a+\x92\x01"}, + {"crypto/internal/fips140/alias", "\xd3\x02"}, + {"crypto/internal/fips140/bigmod", "'\x19\x01\a+\x95\x01"}, + {"crypto/internal/fips140/check", "\"\x0e\a\t\x02\xb7\x01Z"}, + {"crypto/internal/fips140/check/checktest", "'\x8b\x02!"}, + {"crypto/internal/fips140/drbg", "\x03\x1e\x01\x01\x04\x14\x05\t\x01)\x86\x01\x0f7\x01"}, + {"crypto/internal/fips140/ecdh", "\x03\x1f\x05\x02\n\r3\x86\x01\x0f7"}, + {"crypto/internal/fips140/ecdsa", "\x03\x1f\x04\x01\x02\a\x03\x06:\x16pF"}, + {"crypto/internal/fips140/ed25519", "\x03\x1f\x05\x02\x04\f:\xc9\x01\x03"}, + {"crypto/internal/fips140/edwards25519", "\x1e\t\a\x123\x95\x017"}, + {"crypto/internal/fips140/edwards25519/field", "'\x14\x053\x95\x01"}, + {"crypto/internal/fips140/hkdf", "\x03\x1f\x05\t\a<\x16"}, + {"crypto/internal/fips140/hmac", "\x03\x1f\x15\x01\x01:\x16"}, + {"crypto/internal/fips140/mldsa", "\x03\x1b\x04\x05\x02\x0e\x01\x03\x053\x95\x017"}, + {"crypto/internal/fips140/mlkem", "\x03\x1f\x05\x02\x0f\x03\x053\xcc\x01"}, + {"crypto/internal/fips140/nistec", "\x1e\t\r\f3\x95\x01*\r\x14"}, + {"crypto/internal/fips140/nistec/fiat", "'\x148\x95\x01"}, + {"crypto/internal/fips140/pbkdf2", "\x03\x1f\x05\t\a<\x16"}, + {"crypto/internal/fips140/rsa", "\x03\x1b\x04\x04\x01\x02\x0e\x01\x01\x028\x16pF"}, + {"crypto/internal/fips140/sha256", "\x03\x1f\x1e\x01\a+\x16\x7f"}, + {"crypto/internal/fips140/sha3", "\x03\x1f\x19\x05\x012\x95\x01K"}, + {"crypto/internal/fips140/sha512", "\x03\x1f\x1e\x01\a+\x16\x7f"}, + {"crypto/internal/fips140/ssh", "'b"}, + {"crypto/internal/fips140/subtle", "\x1e\a\x1b\xc8\x01"}, + {"crypto/internal/fips140/tls12", "\x03\x1f\x05\t\a\x02:\x16"}, + {"crypto/internal/fips140/tls13", "\x03\x1f\x05\b\b\t3\x16"}, + {"crypto/internal/fips140cache", "\xb2\x02\r&"}, {"crypto/internal/fips140deps", ""}, - {"crypto/internal/fips140deps/byteorder", "\x9c\x01"}, - {"crypto/internal/fips140deps/cpu", "\xb1\x01\a"}, - {"crypto/internal/fips140deps/godebug", "\xb9\x01"}, - {"crypto/internal/fips140deps/time", "\xc9\x02"}, - {"crypto/internal/fips140hash", "7\x1c3\xc9\x01"}, - {"crypto/internal/fips140only", ")\r\x01\x01N3<"}, + {"crypto/internal/fips140deps/byteorder", "\x9f\x01"}, + {"crypto/internal/fips140deps/cpu", "\xb4\x01\a"}, + {"crypto/internal/fips140deps/godebug", "\xbc\x01"}, + {"crypto/internal/fips140deps/time", "\xcd\x02"}, + {"crypto/internal/fips140hash", "8\x1d4\xca\x01"}, + {"crypto/internal/fips140only", ")\x0e\x01\x01P3="}, {"crypto/internal/fips140test", ""}, - {"crypto/internal/hpke", "\x0e\x01\x01\x03\x056#+hM"}, - {"crypto/internal/impl", "\xb9\x02"}, - {"crypto/internal/randutil", "\xf5\x01\x12"}, - {"crypto/internal/sysrand", "qo! \r\r\x01\x01\f\x06"}, - {"crypto/internal/sysrand/internal/seccomp", "q"}, - {"crypto/md5", "\x0e6-\x15\x16h"}, - {"crypto/mlkem", "1"}, - {"crypto/pbkdf2", "4\x0f\x01-\x15"}, - {"crypto/rand", "\x1a\b\a\x1b\x04\x01(\x84\x01\rM"}, - {"crypto/rc4", "%\x1f-\xc7\x01"}, - {"crypto/rsa", "\x0e\f\x01\v\x0f\x0e\x01\x04\x06\a\x1c\x03\x123<\f\x01"}, - {"crypto/sha1", "\x0e\f*\x03*\x15\x16\x15S"}, - {"crypto/sha256", "\x0e\f\x1cP"}, - {"crypto/sha3", "\x0e)O\xc9\x01"}, - {"crypto/sha512", "\x0e\f\x1eN"}, - {"crypto/subtle", "\x1e\x1c\x9c\x01X"}, - {"crypto/tls", "\x03\b\x02\x01\x01\x01\x01\x02\x01\x01\x01\x02\x01\x01\t\x01\r\n\x01\n\x05\x03\x01\x01\x01\x01\x02\x01\x02\x01\x17\x02\x03\x12\x16\x15\b<\x16\x16\r\b\x01\x01\x01\x02\x01\r\x06\x02\x01\x0f"}, - {"crypto/tls/internal/fips140tls", "\x17\xa5\x02"}, - {"crypto/x509", "\x03\v\x01\x01\x01\x01\x01\x01\x01\x015\x05\x01\x01\x02\x05\x0e\x06\x02\x02\x03E\x039\x01\x02\b\x01\x01\x02\a\x10\x05\x01\x06\x02\x05\b\x02\x01\x02\x0e\x02\x01\x01\x02\x03\x01"}, - {"crypto/x509/pkix", "g\x06\a\x8e\x01G"}, - {"database/sql", "\x03\nN\x16\x03\x81\x01\v\a\"\x05\b\x02\x03\x01\r\x02\x02\x02"}, - {"database/sql/driver", "\rd\x03\xb5\x01\x0f\x11"}, - {"debug/buildinfo", "\x03[\x02\x01\x01\b\a\x03e\x1a\x02\x01+\x0f\x1f"}, - {"debug/dwarf", "\x03g\a\x03\x81\x011\x11\x01\x01"}, - {"debug/elf", "\x03\x06T\r\a\x03e\x1b\x01\f \x17\x01\x16"}, - {"debug/gosym", "\x03g\n\xc3\x01\x01\x01\x02"}, - {"debug/macho", "\x03\x06T\r\ne\x1c,\x17\x01"}, - {"debug/pe", "\x03\x06T\r\a\x03e\x1c,\x17\x01\x16"}, - {"debug/plan9obj", "j\a\x03e\x1c,"}, - {"embed", "q*A\x19\x01S"}, + {"crypto/internal/hpke", "\x03\v\x01\x01\x03\x055\x03\x04\x01\x01\x16\a\x03\x13\xcc\x01"}, + {"crypto/internal/impl", "\xbd\x02"}, + {"crypto/internal/randutil", "\xf9\x01\x12"}, + {"crypto/internal/sysrand", "sq! \r\r\x01\x01\f\x06"}, + {"crypto/internal/sysrand/internal/seccomp", "s"}, + {"crypto/md5", "\x0e7.\x16\x16i"}, + {"crypto/mlkem", "\x0e$"}, + {"crypto/mlkem/mlkemtest", "2\x1b&"}, + {"crypto/pbkdf2", "5\x0f\x01.\x16"}, + {"crypto/rand", "\x1a\b\a\x1c\x04\x01)\x86\x01\rM"}, + {"crypto/rc4", "% .\xc9\x01"}, + {"crypto/rsa", "\x0e\f\x01\v\x10\x0e\x01\x04\a\a\x1c\x03\x133=\f\x01"}, + {"crypto/sha1", "\x0e\f+\x03+\x16\x16\x15T"}, + {"crypto/sha256", "\x0e\f\x1dR"}, + {"crypto/sha3", "\x0e*Q\xca\x01"}, + {"crypto/sha512", "\x0e\f\x1fP"}, + {"crypto/subtle", "\x1e\x1d\x9f\x01X"}, + {"crypto/tls", "\x03\b\x02\x01\x01\x01\x01\x02\x01\x01\x01\x02\x01\x01\t\x01\x0e\n\x01\n\x05\x04\x01\x01\x01\x01\x02\x01\x02\x01\x17\x02\x03\x13\x16\x15\b=\x16\x16\r\b\x01\x01\x01\x02\x01\r\x06\x02\x01\x0f"}, + {"crypto/tls/internal/fips140tls", "\x17\xa9\x02"}, + {"crypto/x509", "\x03\v\x01\x01\x01\x01\x01\x01\x01\x016\x06\x01\x01\x02\x05\x0e\x06\x02\x02\x03F\x03:\x01\x02\b\x01\x01\x02\a\x10\x05\x01\x06\a\b\x02\x01\x02\x0e\x02\x01\x01\x02\x03\x01"}, + {"crypto/x509/pkix", "i\x06\a\x90\x01G"}, + {"database/sql", "\x03\nP\x16\x03\x83\x01\v\a\"\x05\b\x02\x03\x01\r\x02\x02\x02"}, + {"database/sql/driver", "\rf\x03\xb7\x01\x0f\x11"}, + {"debug/buildinfo", "\x03]\x02\x01\x01\b\a\x03g\x1a\x02\x01+\x0f\x1f"}, + {"debug/dwarf", "\x03i\a\x03\x83\x011\x11\x01\x01"}, + {"debug/elf", "\x03\x06V\r\a\x03g\x1b\x01\f \x17\x01\x16"}, + {"debug/gosym", "\x03i\n\xc5\x01\x01\x01\x02"}, + {"debug/macho", "\x03\x06V\r\ng\x1c,\x17\x01"}, + {"debug/pe", "\x03\x06V\r\a\x03g\x1c,\x17\x01\x16"}, + {"debug/plan9obj", "l\a\x03g\x1c,"}, + {"embed", "s+B\x19\x01S"}, {"embed/internal/embedtest", ""}, {"encoding", ""}, - {"encoding/ascii85", "\xf5\x01C"}, - {"encoding/asn1", "\x03n\x03e(\x01'\r\x02\x01\x10\x03\x01"}, - {"encoding/base32", "\xf5\x01A\x02"}, - {"encoding/base64", "\x9c\x01YA\x02"}, - {"encoding/binary", "q\x84\x01\f(\r\x05"}, - {"encoding/csv", "\x02\x01n\x03\x81\x01D\x12\x02"}, - {"encoding/gob", "\x02c\x05\a\x03e\x1c\v\x01\x03\x1d\b\x12\x01\x0f\x02"}, - {"encoding/hex", "q\x03\x81\x01A\x03"}, - {"encoding/json", "\x03\x01a\x04\b\x03\x81\x01\f(\r\x02\x01\x02\x10\x01\x01\x02"}, - {"encoding/pem", "\x03f\b\x84\x01A\x03"}, - {"encoding/xml", "\x02\x01b\f\x03\x81\x014\x05\n\x01\x02\x10\x02"}, - {"errors", "\xcc\x01\x83\x01"}, - {"expvar", "nK@\b\v\x15\r\b\x02\x03\x01\x11"}, - {"flag", "e\f\x03\x81\x01,\b\x05\b\x02\x01\x10"}, - {"fmt", "qE&\x19\f \b\r\x02\x03\x12"}, - {"go/ast", "\x03\x01p\x0e\x01r\x03)\b\r\x02\x01\x12\x02"}, - {"go/build", "\x02\x01n\x03\x01\x02\x02\a\x02\x01\x17\x1f\x04\x02\b\x1b\x13\x01+\x01\x04\x01\a\b\x02\x01\x12\x02\x02"}, - {"go/build/constraint", "q\xc7\x01\x01\x12\x02"}, - {"go/constant", "t\x0f~\x01\x024\x01\x02\x12"}, - {"go/doc", "\x04p\x01\x05\t=51\x10\x02\x01\x12\x02"}, - {"go/doc/comment", "\x03q\xc2\x01\x01\x01\x01\x12\x02"}, - {"go/format", "\x03q\x01\v\x01\x02rD"}, - {"go/importer", "v\a\x01\x01\x04\x01q9"}, - {"go/internal/gccgoimporter", "\x02\x01[\x13\x03\x04\v\x01o\x02,\x01\x05\x11\x01\f\b"}, - {"go/internal/gcimporter", "\x02r\x0f\x010\x05\r/,\x15\x03\x02"}, - {"go/internal/srcimporter", "t\x01\x01\n\x03\x01q,\x01\x05\x12\x02\x14"}, - {"go/parser", "\x03n\x03\x01\x02\v\x01r\x01+\x06\x12"}, - {"go/printer", "t\x01\x02\x03\tr\f \x15\x02\x01\x02\v\x05\x02"}, - {"go/scanner", "\x03q\x0fr2\x10\x01\x13\x02"}, - {"go/token", "\x04p\x84\x01>\x02\x03\x01\x0f\x02"}, - {"go/types", "\x03\x01\x06g\x03\x01\x03\b\x03\x024\x062\x04\x03\t \x06\a\b\x01\x01\x01\x02\x01\x0f\x02\x02"}, - {"go/version", "\xbe\x01{"}, - {"hash", "\xf5\x01"}, - {"hash/adler32", "q\x15\x16"}, - {"hash/crc32", "q\x15\x16\x15\x8a\x01\x01\x13"}, - {"hash/crc64", "q\x15\x16\x9f\x01"}, - {"hash/fnv", "q\x15\x16h"}, - {"hash/maphash", "\x86\x01\x11<|"}, - {"html", "\xb9\x02\x02\x12"}, - {"html/template", "\x03k\x06\x18-<\x01\n!\x05\x01\x02\x03\f\x01\x02\f\x01\x03\x02"}, - {"image", "\x02o\x1ef\x0f4\x03\x01"}, + {"encoding/ascii85", "\xf9\x01C"}, + {"encoding/asn1", "\x03p\x03g(\x01'\r\x02\x01\x10\x03\x01"}, + {"encoding/base32", "\xf9\x01A\x02"}, + {"encoding/base64", "\x9f\x01ZA\x02"}, + {"encoding/binary", "s\x86\x01\f(\r\x05"}, + {"encoding/csv", "\x02\x01p\x03\x83\x01D\x12\x02"}, + {"encoding/gob", "\x02e\x05\a\x03g\x1c\v\x01\x03\x1d\b\x12\x01\x0f\x02"}, + {"encoding/hex", "s\x03\x83\x01A\x03"}, + {"encoding/json", "\x03\x01c\x04\b\x03\x83\x01\f(\r\x02\x01\x02\x10\x01\x01\x02"}, + {"encoding/pem", "\x03h\b\x86\x01A\x03"}, + {"encoding/xml", "\x02\x01d\f\x03\x83\x014\x05\n\x01\x02\x10\x02"}, + {"errors", "\xcf\x01\x84\x01"}, + {"expvar", "pLA\b\v\x15\r\b\x02\x03\x01\x11"}, + {"flag", "g\f\x03\x83\x01,\b\x05\b\x02\x01\x10"}, + {"fmt", "sF'\x19\f \b\r\x02\x03\x12"}, + {"go/ast", "\x03\x01r\x0f\x01s\x03)\b\r\x02\x01\x12\x02"}, + {"go/build", "\x02\x01p\x03\x01\x02\x02\b\x02\x01\x17\x1f\x04\x02\b\x1c\x13\x01+\x01\x04\x01\a\b\x02\x01\x12\x02\x02"}, + {"go/build/constraint", "s\xc9\x01\x01\x12\x02"}, + {"go/constant", "v\x10\x7f\x01\x024\x01\x02\x12"}, + {"go/doc", "\x04r\x01\x05\n=61\x10\x02\x01\x12\x02"}, + {"go/doc/comment", "\x03s\xc4\x01\x01\x01\x01\x12\x02"}, + {"go/format", "\x03s\x01\f\x01\x02sD"}, + {"go/importer", "x\a\x01\x02\x04\x01r9"}, + {"go/internal/gccgoimporter", "\x02\x01]\x13\x03\x04\f\x01p\x02,\x01\x05\x11\x01\f\b"}, + {"go/internal/gcimporter", "\x02t\x10\x010\x05\r0,\x15\x03\x02"}, + {"go/internal/scannerhooks", "\x86\x01"}, + {"go/internal/srcimporter", "v\x01\x01\v\x03\x01r,\x01\x05\x12\x02\x14"}, + {"go/parser", "\x03p\x03\x01\x02\b\x04\x01s\x01+\x06\x12"}, + {"go/printer", "v\x01\x02\x03\ns\f \x15\x02\x01\x02\v\x05\x02"}, + {"go/scanner", "\x03s\v\x05s2\x10\x01\x13\x02"}, + {"go/token", "\x04r\x86\x01>\x02\x03\x01\x0f\x02"}, + {"go/types", "\x03\x01\x06i\x03\x01\x03\t\x03\x024\x063\x04\x03\t \x06\a\b\x01\x01\x01\x02\x01\x0f\x02\x02"}, + {"go/version", "\xc1\x01|"}, + {"hash", "\xf9\x01"}, + {"hash/adler32", "s\x16\x16"}, + {"hash/crc32", "s\x16\x16\x15\x8b\x01\x01\x13"}, + {"hash/crc64", "s\x16\x16\xa0\x01"}, + {"hash/fnv", "s\x16\x16i"}, + {"hash/maphash", "\x89\x01\x11<}"}, + {"html", "\xbd\x02\x02\x12"}, + {"html/template", "\x03m\x06\x19-=\x01\n!\x05\x01\x02\x03\f\x01\x02\f\x01\x03\x02"}, + {"image", "\x02q\x1fg\x0f4\x03\x01"}, {"image/color", ""}, - {"image/color/palette", "\x8f\x01"}, - {"image/draw", "\x8e\x01\x01\x04"}, - {"image/gif", "\x02\x01\x05i\x03\x1a\x01\x01\x01\vY"}, - {"image/internal/imageutil", "\x8e\x01"}, - {"image/jpeg", "\x02o\x1d\x01\x04b"}, - {"image/png", "\x02\aa\n\x12\x02\x06\x01fC"}, - {"index/suffixarray", "\x03g\a\x84\x01\f+\n\x01"}, - {"internal/abi", "\xb8\x01\x97\x01"}, - {"internal/asan", "\xcf\x02"}, - {"internal/bisect", "\xae\x02\r\x01"}, - {"internal/buildcfg", "tGf\x06\x02\x05\n\x01"}, - {"internal/bytealg", "\xb1\x01\x9e\x01"}, + {"image/color/palette", "\x92\x01"}, + {"image/draw", "\x91\x01\x01\x04"}, + {"image/gif", "\x02\x01\x05k\x03\x1b\x01\x01\x01\vZ\x0f"}, + {"image/internal/imageutil", "\x91\x01"}, + {"image/jpeg", "\x02q\x1e\x01\x04c"}, + {"image/png", "\x02\ac\n\x13\x02\x06\x01gC"}, + {"index/suffixarray", "\x03i\a\x86\x01\f+\n\x01"}, + {"internal/abi", "\xbb\x01\x98\x01"}, + {"internal/asan", "\xd3\x02"}, + {"internal/bisect", "\xb2\x02\r\x01"}, + {"internal/buildcfg", "vHg\x06\x02\x05\n\x01"}, + {"internal/bytealg", "\xb4\x01\x9f\x01"}, {"internal/byteorder", ""}, {"internal/cfg", ""}, - {"internal/cgrouptest", "tZS\x06\x0f\x02\x01\x04\x01"}, - {"internal/chacha8rand", "\x9c\x01\x15\a\x97\x01"}, + {"internal/cgrouptest", "v[T\x06\x0f\x02\x01\x04\x01"}, + {"internal/chacha8rand", "\x9f\x01\x15\a\x98\x01"}, {"internal/copyright", ""}, {"internal/coverage", ""}, {"internal/coverage/calloc", ""}, - {"internal/coverage/cfile", "n\x06\x16\x17\x01\x02\x01\x01\x01\x01\x01\x01\x01\"\x02&,\x06\a\n\x01\x03\r\x06"}, - {"internal/coverage/cformat", "\x04p-\x04P\v6\x01\x02\r"}, - {"internal/coverage/cmerge", "t-`"}, - {"internal/coverage/decodecounter", "j\n-\v\x02G,\x17\x17"}, - {"internal/coverage/decodemeta", "\x02h\n\x16\x17\v\x02G,"}, - {"internal/coverage/encodecounter", "\x02h\n-\f\x01\x02E\v!\x15"}, - {"internal/coverage/encodemeta", "\x02\x01g\n\x12\x04\x17\r\x02E,."}, - {"internal/coverage/pods", "\x04p-\x80\x01\x06\x05\n\x02\x01"}, - {"internal/coverage/rtcov", "\xcf\x02"}, - {"internal/coverage/slicereader", "j\n\x81\x01Z"}, - {"internal/coverage/slicewriter", "t\x81\x01"}, - {"internal/coverage/stringtab", "t8\x04E"}, + {"internal/coverage/cfile", "p\x06\x17\x17\x01\x02\x01\x01\x01\x01\x01\x01\x01\"\x02',\x06\a\n\x01\x03\r\x06"}, + {"internal/coverage/cformat", "\x04r.\x04Q\v6\x01\x02\r"}, + {"internal/coverage/cmerge", "v.a"}, + {"internal/coverage/decodecounter", "l\n.\v\x02H,\x17\x17"}, + {"internal/coverage/decodemeta", "\x02j\n\x17\x17\v\x02H,"}, + {"internal/coverage/encodecounter", "\x02j\n.\f\x01\x02F\v!\x15"}, + {"internal/coverage/encodemeta", "\x02\x01i\n\x13\x04\x17\r\x02F,."}, + {"internal/coverage/pods", "\x04r.\x81\x01\x06\x05\n\x02\x01"}, + {"internal/coverage/rtcov", "\xd3\x02"}, + {"internal/coverage/slicereader", "l\n\x83\x01Z"}, + {"internal/coverage/slicewriter", "v\x83\x01"}, + {"internal/coverage/stringtab", "v9\x04F"}, {"internal/coverage/test", ""}, {"internal/coverage/uleb128", ""}, - {"internal/cpu", "\xcf\x02"}, - {"internal/dag", "\x04p\xc2\x01\x03"}, - {"internal/diff", "\x03q\xc3\x01\x02"}, - {"internal/exportdata", "\x02\x01n\x03\x02c\x1c,\x01\x05\x11\x01\x02"}, - {"internal/filepathlite", "q*A\x1a@"}, - {"internal/fmtsort", "\x04\xa5\x02\r"}, - {"internal/fuzz", "\x03\nE\x18\x04\x03\x03\x01\v\x036<\f\x03\x1d\x01\x05\x02\x05\n\x01\x02\x01\x01\f\x04\x02"}, + {"internal/cpu", "\xd3\x02"}, + {"internal/dag", "\x04r\xc4\x01\x03"}, + {"internal/diff", "\x03s\xc5\x01\x02"}, + {"internal/exportdata", "\x02\x01p\x03\x02e\x1c,\x01\x05\x11\x01\x02"}, + {"internal/filepathlite", "s+B\x1a@"}, + {"internal/fmtsort", "\x04\xa9\x02\r"}, + {"internal/fuzz", "\x03\nG\x18\x04\x03\x03\x01\f\x036=\f\x03\x1d\x01\x05\x02\x05\n\x01\x02\x01\x01\f\x04\x02"}, {"internal/goarch", ""}, - {"internal/godebug", "\x99\x01!\x81\x01\x01\x13"}, + {"internal/godebug", "\x9c\x01!\x82\x01\x01\x13"}, {"internal/godebugs", ""}, {"internal/goexperiment", ""}, {"internal/goos", ""}, - {"internal/goroot", "\xa1\x02\x01\x05\x12\x02"}, + {"internal/goroot", "\xa5\x02\x01\x05\x12\x02"}, {"internal/gover", "\x04"}, {"internal/goversion", ""}, - {"internal/lazyregexp", "\xa1\x02\v\r\x02"}, - {"internal/lazytemplate", "\xf5\x01,\x18\x02\f"}, - {"internal/msan", "\xcf\x02"}, + {"internal/lazyregexp", "\xa5\x02\v\r\x02"}, + {"internal/lazytemplate", "\xf9\x01,\x18\x02\f"}, + {"internal/msan", "\xd3\x02"}, {"internal/nettrace", ""}, - {"internal/obscuretestdata", "i\x8c\x01,"}, - {"internal/oserror", "q"}, - {"internal/pkgbits", "\x03O\x18\a\x03\x04\vr\r\x1f\r\n\x01"}, + {"internal/obscuretestdata", "k\x8e\x01,"}, + {"internal/oserror", "s"}, + {"internal/pkgbits", "\x03Q\x18\a\x03\x04\fs\r\x1f\r\n\x01"}, {"internal/platform", ""}, - {"internal/poll", "qj\x05\x159\r\x01\x01\f\x06"}, - {"internal/profile", "\x03\x04j\x03\x81\x017\n\x01\x01\x01\x10"}, + {"internal/poll", "sl\x05\x159\r\x01\x01\f\x06"}, + {"internal/profile", "\x03\x04l\x03\x83\x017\n\x01\x01\x01\x10"}, {"internal/profilerecord", ""}, - {"internal/race", "\x97\x01\xb8\x01"}, - {"internal/reflectlite", "\x97\x01!:\x16"}, - {"vendor/golang.org/x/text/unicode/norm", "j\n\x81\x01F\x12\x11"}, - {"weak", "\x97\x01\x97\x01!"}, + {"vendor/golang.org/x/crypto/internal/alias", "\xd3\x02"}, + {"vendor/golang.org/x/crypto/internal/poly1305", "W\x15\x9c\x01"}, + {"vendor/golang.org/x/net/dns/dnsmessage", "s\xc7\x01"}, + {"vendor/golang.org/x/net/http/httpguts", "\x8f\x02\x14\x1a\x14\r"}, + {"vendor/golang.org/x/net/http/httpproxy", "s\x03\x99\x01\x10\x05\x01\x18\x14\r"}, + {"vendor/golang.org/x/net/http2/hpack", "\x03p\x03\x83\x01F"}, + {"vendor/golang.org/x/net/idna", "v\x8f\x018\x14\x10\x02\x01"}, + {"vendor/golang.org/x/net/nettest", "\x03i\a\x03\x83\x01\x11\x05\x16\x01\f\n\x01\x02\x02\x01\v"}, + {"vendor/golang.org/x/sys/cpu", "\xa5\x02\r\n\x01\x16"}, + {"vendor/golang.org/x/text/secure/bidirule", "s\xde\x01\x11\x01"}, + {"vendor/golang.org/x/text/transform", "\x03p\x86\x01X"}, + {"vendor/golang.org/x/text/unicode/bidi", "\x03\bk\x87\x01>\x16"}, + {"vendor/golang.org/x/text/unicode/norm", "l\n\x83\x01F\x12\x11"}, + {"weak", "\x9a\x01\x98\x01!"}, } // bootstrap is the list of bootstrap packages extracted from cmd/dist. @@ -385,6 +389,7 @@ var bootstrap = map[string]bool{ "cmd/compile/internal/arm64": true, "cmd/compile/internal/base": true, "cmd/compile/internal/bitvec": true, + "cmd/compile/internal/bloop": true, "cmd/compile/internal/compare": true, "cmd/compile/internal/coverage": true, "cmd/compile/internal/deadlocals": true, @@ -413,6 +418,7 @@ var bootstrap = map[string]bool{ "cmd/compile/internal/riscv64": true, "cmd/compile/internal/rttype": true, "cmd/compile/internal/s390x": true, + "cmd/compile/internal/slice": true, "cmd/compile/internal/ssa": true, "cmd/compile/internal/ssagen": true, "cmd/compile/internal/staticdata": true, diff --git a/vendor/golang.org/x/tools/internal/stdlib/manifest.go b/vendor/golang.org/x/tools/internal/stdlib/manifest.go index 362f23c43..f1e24625a 100644 --- a/vendor/golang.org/x/tools/internal/stdlib/manifest.go +++ b/vendor/golang.org/x/tools/internal/stdlib/manifest.go @@ -16,6 +16,14 @@ var PackageSymbols = map[string][]Symbol{ {"(*Writer).Flush", Method, 0, ""}, {"(*Writer).Write", Method, 0, ""}, {"(*Writer).WriteHeader", Method, 0, ""}, + {"(FileInfoNames).Gname", Method, 23, ""}, + {"(FileInfoNames).IsDir", Method, 23, ""}, + {"(FileInfoNames).ModTime", Method, 23, ""}, + {"(FileInfoNames).Mode", Method, 23, ""}, + {"(FileInfoNames).Name", Method, 23, ""}, + {"(FileInfoNames).Size", Method, 23, ""}, + {"(FileInfoNames).Sys", Method, 23, ""}, + {"(FileInfoNames).Uname", Method, 23, ""}, {"(Format).String", Method, 10, ""}, {"ErrFieldTooLong", Var, 0, ""}, {"ErrHeader", Var, 0, ""}, @@ -338,6 +346,9 @@ var PackageSymbols = map[string][]Symbol{ {"(*Writer).Write", Method, 0, ""}, {"(CorruptInputError).Error", Method, 0, ""}, {"(InternalError).Error", Method, 0, ""}, + {"(Reader).Read", Method, 0, ""}, + {"(Reader).ReadByte", Method, 0, ""}, + {"(Resetter).Reset", Method, 4, ""}, {"BestCompression", Const, 0, ""}, {"BestSpeed", Const, 0, ""}, {"CorruptInputError", Type, 0, ""}, @@ -409,6 +420,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*Writer).Flush", Method, 0, ""}, {"(*Writer).Reset", Method, 2, ""}, {"(*Writer).Write", Method, 0, ""}, + {"(Resetter).Reset", Method, 4, ""}, {"BestCompression", Const, 0, ""}, {"BestSpeed", Const, 0, ""}, {"DefaultCompression", Const, 0, ""}, @@ -426,6 +438,11 @@ var PackageSymbols = map[string][]Symbol{ {"Writer", Type, 0, ""}, }, "container/heap": { + {"(Interface).Len", Method, 0, ""}, + {"(Interface).Less", Method, 0, ""}, + {"(Interface).Pop", Method, 0, ""}, + {"(Interface).Push", Method, 0, ""}, + {"(Interface).Swap", Method, 0, ""}, {"Fix", Func, 2, "func(h Interface, i int)"}, {"Init", Func, 0, "func(h Interface)"}, {"Interface", Type, 0, ""}, @@ -469,6 +486,10 @@ var PackageSymbols = map[string][]Symbol{ {"Ring.Value", Field, 0, ""}, }, "context": { + {"(Context).Deadline", Method, 7, ""}, + {"(Context).Done", Method, 7, ""}, + {"(Context).Err", Method, 7, ""}, + {"(Context).Value", Method, 7, ""}, {"AfterFunc", Func, 21, "func(ctx Context, f func()) (stop func() bool)"}, {"Background", Func, 7, "func() Context"}, {"CancelCauseFunc", Type, 20, ""}, @@ -488,17 +509,31 @@ var PackageSymbols = map[string][]Symbol{ {"WithoutCancel", Func, 21, "func(parent Context) Context"}, }, "crypto": { + {"(Decapsulator).Decapsulate", Method, 26, ""}, + {"(Decapsulator).Encapsulator", Method, 26, ""}, + {"(Decrypter).Decrypt", Method, 5, ""}, + {"(Decrypter).Public", Method, 5, ""}, + {"(Encapsulator).Bytes", Method, 26, ""}, + {"(Encapsulator).Encapsulate", Method, 26, ""}, {"(Hash).Available", Method, 0, ""}, {"(Hash).HashFunc", Method, 4, ""}, {"(Hash).New", Method, 0, ""}, {"(Hash).Size", Method, 0, ""}, {"(Hash).String", Method, 15, ""}, + {"(MessageSigner).Public", Method, 25, ""}, + {"(MessageSigner).Sign", Method, 25, ""}, + {"(MessageSigner).SignMessage", Method, 25, ""}, + {"(Signer).Public", Method, 4, ""}, + {"(Signer).Sign", Method, 4, ""}, + {"(SignerOpts).HashFunc", Method, 4, ""}, {"BLAKE2b_256", Const, 9, ""}, {"BLAKE2b_384", Const, 9, ""}, {"BLAKE2b_512", Const, 9, ""}, {"BLAKE2s_256", Const, 9, ""}, + {"Decapsulator", Type, 26, ""}, {"Decrypter", Type, 5, ""}, {"DecrypterOpts", Type, 5, ""}, + {"Encapsulator", Type, 26, ""}, {"Hash", Type, 0, ""}, {"MD4", Const, 0, ""}, {"MD5", Const, 0, ""}, @@ -530,6 +565,16 @@ var PackageSymbols = map[string][]Symbol{ {"NewCipher", Func, 0, "func(key []byte) (cipher.Block, error)"}, }, "crypto/cipher": { + {"(AEAD).NonceSize", Method, 2, ""}, + {"(AEAD).Open", Method, 2, ""}, + {"(AEAD).Overhead", Method, 2, ""}, + {"(AEAD).Seal", Method, 2, ""}, + {"(Block).BlockSize", Method, 0, ""}, + {"(Block).Decrypt", Method, 0, ""}, + {"(Block).Encrypt", Method, 0, ""}, + {"(BlockMode).BlockSize", Method, 0, ""}, + {"(BlockMode).CryptBlocks", Method, 0, ""}, + {"(Stream).XORKeyStream", Method, 0, ""}, {"(StreamReader).Read", Method, 0, ""}, {"(StreamWriter).Close", Method, 0, ""}, {"(StreamWriter).Write", Method, 0, ""}, @@ -594,7 +639,13 @@ var PackageSymbols = map[string][]Symbol{ {"(*PublicKey).Bytes", Method, 20, ""}, {"(*PublicKey).Curve", Method, 20, ""}, {"(*PublicKey).Equal", Method, 20, ""}, - {"Curve", Type, 20, ""}, + {"(Curve).GenerateKey", Method, 20, ""}, + {"(Curve).NewPrivateKey", Method, 20, ""}, + {"(Curve).NewPublicKey", Method, 20, ""}, + {"(KeyExchanger).Curve", Method, 26, ""}, + {"(KeyExchanger).ECDH", Method, 26, ""}, + {"(KeyExchanger).PublicKey", Method, 26, ""}, + {"KeyExchanger", Type, 26, ""}, {"P256", Func, 20, "func() Curve"}, {"P384", Func, 20, "func() Curve"}, {"P521", Func, 20, "func() Curve"}, @@ -667,6 +718,12 @@ var PackageSymbols = map[string][]Symbol{ {"(*CurveParams).Params", Method, 0, ""}, {"(*CurveParams).ScalarBaseMult", Method, 0, ""}, {"(*CurveParams).ScalarMult", Method, 0, ""}, + {"(Curve).Add", Method, 0, ""}, + {"(Curve).Double", Method, 0, ""}, + {"(Curve).IsOnCurve", Method, 0, ""}, + {"(Curve).Params", Method, 0, ""}, + {"(Curve).ScalarBaseMult", Method, 0, ""}, + {"(Curve).ScalarMult", Method, 0, ""}, {"Curve", Type, 0, ""}, {"CurveParams", Type, 0, ""}, {"CurveParams.B", Field, 0, ""}, @@ -688,6 +745,7 @@ var PackageSymbols = map[string][]Symbol{ }, "crypto/fips140": { {"Enabled", Func, 24, "func() bool"}, + {"Version", Func, 26, "func() string"}, }, "crypto/hkdf": { {"Expand", Func, 24, "func[H hash.Hash](h func() H, pseudorandomKey []byte, info string, keyLength int) ([]byte, error)"}, @@ -708,9 +766,11 @@ var PackageSymbols = map[string][]Symbol{ {"(*DecapsulationKey1024).Bytes", Method, 24, ""}, {"(*DecapsulationKey1024).Decapsulate", Method, 24, ""}, {"(*DecapsulationKey1024).EncapsulationKey", Method, 24, ""}, + {"(*DecapsulationKey1024).Encapsulator", Method, 26, ""}, {"(*DecapsulationKey768).Bytes", Method, 24, ""}, {"(*DecapsulationKey768).Decapsulate", Method, 24, ""}, {"(*DecapsulationKey768).EncapsulationKey", Method, 24, ""}, + {"(*DecapsulationKey768).Encapsulator", Method, 26, ""}, {"(*EncapsulationKey1024).Bytes", Method, 24, ""}, {"(*EncapsulationKey1024).Encapsulate", Method, 24, ""}, {"(*EncapsulationKey768).Bytes", Method, 24, ""}, @@ -732,6 +792,10 @@ var PackageSymbols = map[string][]Symbol{ {"SeedSize", Const, 24, ""}, {"SharedKeySize", Const, 24, ""}, }, + "crypto/mlkem/mlkemtest": { + {"Encapsulate1024", Func, 26, "func(ek *mlkem.EncapsulationKey1024, random []byte) (sharedKey []byte, ciphertext []byte, err error)"}, + {"Encapsulate768", Func, 26, "func(ek *mlkem.EncapsulationKey768, random []byte) (sharedKey []byte, ciphertext []byte, err error)"}, + }, "crypto/pbkdf2": { {"Key", Func, 24, "func[Hash hash.Hash](h func() Hash, password string, salt []byte, iter int, keyLength int) ([]byte, error)"}, }, @@ -769,6 +833,7 @@ var PackageSymbols = map[string][]Symbol{ {"DecryptPKCS1v15", Func, 0, "func(random io.Reader, priv *PrivateKey, ciphertext []byte) ([]byte, error)"}, {"DecryptPKCS1v15SessionKey", Func, 0, "func(random io.Reader, priv *PrivateKey, ciphertext []byte, key []byte) error"}, {"EncryptOAEP", Func, 0, "func(hash hash.Hash, random io.Reader, pub *PublicKey, msg []byte, label []byte) ([]byte, error)"}, + {"EncryptOAEPWithOptions", Func, 26, "func(random io.Reader, pub *PublicKey, msg []byte, opts *OAEPOptions) ([]byte, error)"}, {"EncryptPKCS1v15", Func, 0, "func(random io.Reader, pub *PublicKey, msg []byte) ([]byte, error)"}, {"ErrDecryption", Var, 0, ""}, {"ErrMessageTooLong", Var, 0, ""}, @@ -921,6 +986,8 @@ var PackageSymbols = map[string][]Symbol{ {"(*SessionState).Bytes", Method, 21, ""}, {"(AlertError).Error", Method, 21, ""}, {"(ClientAuthType).String", Method, 15, ""}, + {"(ClientSessionCache).Get", Method, 3, ""}, + {"(ClientSessionCache).Put", Method, 3, ""}, {"(CurveID).String", Method, 15, ""}, {"(QUICEncryptionLevel).String", Method, 21, ""}, {"(RecordHeaderError).Error", Method, 6, ""}, @@ -953,6 +1020,7 @@ var PackageSymbols = map[string][]Symbol{ {"ClientHelloInfo.CipherSuites", Field, 4, ""}, {"ClientHelloInfo.Conn", Field, 8, ""}, {"ClientHelloInfo.Extensions", Field, 24, ""}, + {"ClientHelloInfo.HelloRetryRequest", Field, 26, ""}, {"ClientHelloInfo.ServerName", Field, 4, ""}, {"ClientHelloInfo.SignatureSchemes", Field, 8, ""}, {"ClientHelloInfo.SupportedCurves", Field, 4, ""}, @@ -1001,6 +1069,7 @@ var PackageSymbols = map[string][]Symbol{ {"ConnectionState.DidResume", Field, 1, ""}, {"ConnectionState.ECHAccepted", Field, 23, ""}, {"ConnectionState.HandshakeComplete", Field, 0, ""}, + {"ConnectionState.HelloRetryRequest", Field, 26, ""}, {"ConnectionState.NegotiatedProtocol", Field, 0, ""}, {"ConnectionState.NegotiatedProtocolIsMutual", Field, 0, ""}, {"ConnectionState.OCSPResponse", Field, 5, ""}, @@ -1055,8 +1124,10 @@ var PackageSymbols = map[string][]Symbol{ {"QUICEncryptionLevelEarly", Const, 21, ""}, {"QUICEncryptionLevelHandshake", Const, 21, ""}, {"QUICEncryptionLevelInitial", Const, 21, ""}, + {"QUICErrorEvent", Const, 26, ""}, {"QUICEvent", Type, 21, ""}, {"QUICEvent.Data", Field, 21, ""}, + {"QUICEvent.Err", Field, 26, ""}, {"QUICEvent.Kind", Field, 21, ""}, {"QUICEvent.Level", Field, 21, ""}, {"QUICEvent.SessionState", Field, 23, ""}, @@ -1151,8 +1222,10 @@ var PackageSymbols = map[string][]Symbol{ {"(*RevocationList).CheckSignatureFrom", Method, 19, ""}, {"(CertificateInvalidError).Error", Method, 0, ""}, {"(ConstraintViolationError).Error", Method, 0, ""}, + {"(ExtKeyUsage).String", Method, 26, ""}, {"(HostnameError).Error", Method, 0, ""}, {"(InsecureAlgorithmError).Error", Method, 6, ""}, + {"(KeyUsage).String", Method, 26, ""}, {"(OID).AppendBinary", Method, 24, ""}, {"(OID).AppendText", Method, 24, ""}, {"(OID).Equal", Method, 22, ""}, @@ -1516,6 +1589,9 @@ var PackageSymbols = map[string][]Symbol{ {"(NullInt64).Value", Method, 0, ""}, {"(NullString).Value", Method, 0, ""}, {"(NullTime).Value", Method, 13, ""}, + {"(Result).LastInsertId", Method, 0, ""}, + {"(Result).RowsAffected", Method, 0, ""}, + {"(Scanner).Scan", Method, 0, ""}, {"ColumnType", Type, 8, ""}, {"Conn", Type, 9, ""}, {"DB", Type, 0, ""}, @@ -1547,8 +1623,6 @@ var PackageSymbols = map[string][]Symbol{ {"NamedArg.Name", Field, 8, ""}, {"NamedArg.Value", Field, 8, ""}, {"Null", Type, 22, ""}, - {"Null.V", Field, 22, ""}, - {"Null.Valid", Field, 22, ""}, {"NullBool", Type, 0, ""}, {"NullBool.Bool", Field, 0, ""}, {"NullBool.Valid", Field, 0, ""}, @@ -1591,10 +1665,72 @@ var PackageSymbols = map[string][]Symbol{ {"TxOptions.ReadOnly", Field, 8, ""}, }, "database/sql/driver": { + {"(ColumnConverter).ColumnConverter", Method, 0, ""}, + {"(Conn).Begin", Method, 0, ""}, + {"(Conn).Close", Method, 0, ""}, + {"(Conn).Prepare", Method, 0, ""}, + {"(ConnBeginTx).BeginTx", Method, 8, ""}, + {"(ConnPrepareContext).PrepareContext", Method, 8, ""}, + {"(Connector).Connect", Method, 10, ""}, + {"(Connector).Driver", Method, 10, ""}, + {"(Driver).Open", Method, 0, ""}, + {"(DriverContext).OpenConnector", Method, 10, ""}, + {"(Execer).Exec", Method, 0, ""}, + {"(ExecerContext).ExecContext", Method, 8, ""}, + {"(NamedValueChecker).CheckNamedValue", Method, 9, ""}, {"(NotNull).ConvertValue", Method, 0, ""}, {"(Null).ConvertValue", Method, 0, ""}, + {"(Pinger).Ping", Method, 8, ""}, + {"(Queryer).Query", Method, 1, ""}, + {"(QueryerContext).QueryContext", Method, 8, ""}, + {"(Result).LastInsertId", Method, 0, ""}, + {"(Result).RowsAffected", Method, 0, ""}, + {"(Rows).Close", Method, 0, ""}, + {"(Rows).Columns", Method, 0, ""}, + {"(Rows).Next", Method, 0, ""}, {"(RowsAffected).LastInsertId", Method, 0, ""}, {"(RowsAffected).RowsAffected", Method, 0, ""}, + {"(RowsColumnScanner).Close", Method, 26, ""}, + {"(RowsColumnScanner).Columns", Method, 26, ""}, + {"(RowsColumnScanner).Next", Method, 26, ""}, + {"(RowsColumnScanner).ScanColumn", Method, 26, ""}, + {"(RowsColumnTypeDatabaseTypeName).Close", Method, 8, ""}, + {"(RowsColumnTypeDatabaseTypeName).ColumnTypeDatabaseTypeName", Method, 8, ""}, + {"(RowsColumnTypeDatabaseTypeName).Columns", Method, 8, ""}, + {"(RowsColumnTypeDatabaseTypeName).Next", Method, 8, ""}, + {"(RowsColumnTypeLength).Close", Method, 8, ""}, + {"(RowsColumnTypeLength).ColumnTypeLength", Method, 8, ""}, + {"(RowsColumnTypeLength).Columns", Method, 8, ""}, + {"(RowsColumnTypeLength).Next", Method, 8, ""}, + {"(RowsColumnTypeNullable).Close", Method, 8, ""}, + {"(RowsColumnTypeNullable).ColumnTypeNullable", Method, 8, ""}, + {"(RowsColumnTypeNullable).Columns", Method, 8, ""}, + {"(RowsColumnTypeNullable).Next", Method, 8, ""}, + {"(RowsColumnTypePrecisionScale).Close", Method, 8, ""}, + {"(RowsColumnTypePrecisionScale).ColumnTypePrecisionScale", Method, 8, ""}, + {"(RowsColumnTypePrecisionScale).Columns", Method, 8, ""}, + {"(RowsColumnTypePrecisionScale).Next", Method, 8, ""}, + {"(RowsColumnTypeScanType).Close", Method, 8, ""}, + {"(RowsColumnTypeScanType).ColumnTypeScanType", Method, 8, ""}, + {"(RowsColumnTypeScanType).Columns", Method, 8, ""}, + {"(RowsColumnTypeScanType).Next", Method, 8, ""}, + {"(RowsNextResultSet).Close", Method, 8, ""}, + {"(RowsNextResultSet).Columns", Method, 8, ""}, + {"(RowsNextResultSet).HasNextResultSet", Method, 8, ""}, + {"(RowsNextResultSet).Next", Method, 8, ""}, + {"(RowsNextResultSet).NextResultSet", Method, 8, ""}, + {"(SessionResetter).ResetSession", Method, 10, ""}, + {"(Stmt).Close", Method, 0, ""}, + {"(Stmt).Exec", Method, 0, ""}, + {"(Stmt).NumInput", Method, 0, ""}, + {"(Stmt).Query", Method, 0, ""}, + {"(StmtExecContext).ExecContext", Method, 8, ""}, + {"(StmtQueryContext).QueryContext", Method, 8, ""}, + {"(Tx).Commit", Method, 0, ""}, + {"(Tx).Rollback", Method, 0, ""}, + {"(Validator).IsValid", Method, 15, ""}, + {"(ValueConverter).ConvertValue", Method, 0, ""}, + {"(Valuer).Value", Method, 0, ""}, {"Bool", Var, 0, ""}, {"ColumnConverter", Type, 0, ""}, {"Conn", Type, 0, ""}, @@ -1756,6 +1892,9 @@ var PackageSymbols = map[string][]Symbol{ {"(DecodeError).Error", Method, 0, ""}, {"(Tag).GoString", Method, 0, ""}, {"(Tag).String", Method, 0, ""}, + {"(Type).Common", Method, 0, ""}, + {"(Type).Size", Method, 0, ""}, + {"(Type).String", Method, 0, ""}, {"AddrType", Type, 0, ""}, {"AddrType.BasicType", Field, 0, ""}, {"ArrayType", Type, 0, ""}, @@ -3163,6 +3302,7 @@ var PackageSymbols = map[string][]Symbol{ {"R_LARCH_B16", Const, 20, ""}, {"R_LARCH_B21", Const, 20, ""}, {"R_LARCH_B26", Const, 20, ""}, + {"R_LARCH_CALL36", Const, 26, ""}, {"R_LARCH_CFA", Const, 22, ""}, {"R_LARCH_COPY", Const, 19, ""}, {"R_LARCH_DELETE", Const, 22, ""}, @@ -3220,11 +3360,25 @@ var PackageSymbols = map[string][]Symbol{ {"R_LARCH_SUB64", Const, 19, ""}, {"R_LARCH_SUB8", Const, 19, ""}, {"R_LARCH_SUB_ULEB128", Const, 22, ""}, + {"R_LARCH_TLS_DESC32", Const, 26, ""}, + {"R_LARCH_TLS_DESC64", Const, 26, ""}, + {"R_LARCH_TLS_DESC64_HI12", Const, 26, ""}, + {"R_LARCH_TLS_DESC64_LO20", Const, 26, ""}, + {"R_LARCH_TLS_DESC64_PC_HI12", Const, 26, ""}, + {"R_LARCH_TLS_DESC64_PC_LO20", Const, 26, ""}, + {"R_LARCH_TLS_DESC_CALL", Const, 26, ""}, + {"R_LARCH_TLS_DESC_HI20", Const, 26, ""}, + {"R_LARCH_TLS_DESC_LD", Const, 26, ""}, + {"R_LARCH_TLS_DESC_LO12", Const, 26, ""}, + {"R_LARCH_TLS_DESC_PCREL20_S2", Const, 26, ""}, + {"R_LARCH_TLS_DESC_PC_HI20", Const, 26, ""}, + {"R_LARCH_TLS_DESC_PC_LO12", Const, 26, ""}, {"R_LARCH_TLS_DTPMOD32", Const, 19, ""}, {"R_LARCH_TLS_DTPMOD64", Const, 19, ""}, {"R_LARCH_TLS_DTPREL32", Const, 19, ""}, {"R_LARCH_TLS_DTPREL64", Const, 19, ""}, {"R_LARCH_TLS_GD_HI20", Const, 20, ""}, + {"R_LARCH_TLS_GD_PCREL20_S2", Const, 26, ""}, {"R_LARCH_TLS_GD_PC_HI20", Const, 20, ""}, {"R_LARCH_TLS_IE64_HI12", Const, 20, ""}, {"R_LARCH_TLS_IE64_LO20", Const, 20, ""}, @@ -3235,11 +3389,15 @@ var PackageSymbols = map[string][]Symbol{ {"R_LARCH_TLS_IE_PC_HI20", Const, 20, ""}, {"R_LARCH_TLS_IE_PC_LO12", Const, 20, ""}, {"R_LARCH_TLS_LD_HI20", Const, 20, ""}, + {"R_LARCH_TLS_LD_PCREL20_S2", Const, 26, ""}, {"R_LARCH_TLS_LD_PC_HI20", Const, 20, ""}, {"R_LARCH_TLS_LE64_HI12", Const, 20, ""}, {"R_LARCH_TLS_LE64_LO20", Const, 20, ""}, + {"R_LARCH_TLS_LE_ADD_R", Const, 26, ""}, {"R_LARCH_TLS_LE_HI20", Const, 20, ""}, + {"R_LARCH_TLS_LE_HI20_R", Const, 26, ""}, {"R_LARCH_TLS_LE_LO12", Const, 20, ""}, + {"R_LARCH_TLS_LE_LO12_R", Const, 26, ""}, {"R_LARCH_TLS_TPREL32", Const, 19, ""}, {"R_LARCH_TLS_TPREL64", Const, 19, ""}, {"R_MIPS", Type, 6, ""}, @@ -3944,6 +4102,7 @@ var PackageSymbols = map[string][]Symbol{ {"(FatArch).ImportedSymbols", Method, 3, ""}, {"(FatArch).Section", Method, 3, ""}, {"(FatArch).Segment", Method, 3, ""}, + {"(Load).Raw", Method, 0, ""}, {"(LoadBytes).Raw", Method, 0, ""}, {"(LoadCmd).GoString", Method, 0, ""}, {"(LoadCmd).String", Method, 0, ""}, @@ -4590,6 +4749,12 @@ var PackageSymbols = map[string][]Symbol{ {"FS", Type, 16, ""}, }, "encoding": { + {"(BinaryAppender).AppendBinary", Method, 24, ""}, + {"(BinaryMarshaler).MarshalBinary", Method, 2, ""}, + {"(BinaryUnmarshaler).UnmarshalBinary", Method, 2, ""}, + {"(TextAppender).AppendText", Method, 24, ""}, + {"(TextMarshaler).MarshalText", Method, 2, ""}, + {"(TextUnmarshaler).UnmarshalText", Method, 2, ""}, {"BinaryAppender", Type, 24, ""}, {"BinaryMarshaler", Type, 2, ""}, {"BinaryUnmarshaler", Type, 2, ""}, @@ -4705,6 +4870,17 @@ var PackageSymbols = map[string][]Symbol{ {"URLEncoding", Var, 0, ""}, }, "encoding/binary": { + {"(AppendByteOrder).AppendUint16", Method, 19, ""}, + {"(AppendByteOrder).AppendUint32", Method, 19, ""}, + {"(AppendByteOrder).AppendUint64", Method, 19, ""}, + {"(AppendByteOrder).String", Method, 19, ""}, + {"(ByteOrder).PutUint16", Method, 0, ""}, + {"(ByteOrder).PutUint32", Method, 0, ""}, + {"(ByteOrder).PutUint64", Method, 0, ""}, + {"(ByteOrder).String", Method, 0, ""}, + {"(ByteOrder).Uint16", Method, 0, ""}, + {"(ByteOrder).Uint32", Method, 0, ""}, + {"(ByteOrder).Uint64", Method, 0, ""}, {"Append", Func, 23, "func(buf []byte, order ByteOrder, data any) ([]byte, error)"}, {"AppendByteOrder", Type, 19, ""}, {"AppendUvarint", Func, 19, "func(buf []byte, x uint64) []byte"}, @@ -4767,6 +4943,8 @@ var PackageSymbols = map[string][]Symbol{ {"(*Decoder).DecodeValue", Method, 0, ""}, {"(*Encoder).Encode", Method, 0, ""}, {"(*Encoder).EncodeValue", Method, 0, ""}, + {"(GobDecoder).GobDecode", Method, 0, ""}, + {"(GobEncoder).GobEncode", Method, 0, ""}, {"CommonType", Type, 0, ""}, {"CommonType.Id", Field, 0, ""}, {"CommonType.Name", Field, 0, ""}, @@ -4819,10 +4997,12 @@ var PackageSymbols = map[string][]Symbol{ {"(*UnsupportedTypeError).Error", Method, 0, ""}, {"(*UnsupportedValueError).Error", Method, 0, ""}, {"(Delim).String", Method, 5, ""}, + {"(Marshaler).MarshalJSON", Method, 0, ""}, {"(Number).Float64", Method, 1, ""}, {"(Number).Int64", Method, 1, ""}, {"(Number).String", Method, 1, ""}, {"(RawMessage).MarshalJSON", Method, 8, ""}, + {"(Unmarshaler).UnmarshalJSON", Method, 0, ""}, {"Compact", Func, 0, "func(dst *bytes.Buffer, src []byte) error"}, {"Decoder", Type, 0, ""}, {"Delim", Type, 5, ""}, @@ -4894,10 +5074,15 @@ var PackageSymbols = map[string][]Symbol{ {"(CharData).Copy", Method, 0, ""}, {"(Comment).Copy", Method, 0, ""}, {"(Directive).Copy", Method, 0, ""}, + {"(Marshaler).MarshalXML", Method, 2, ""}, + {"(MarshalerAttr).MarshalXMLAttr", Method, 2, ""}, {"(ProcInst).Copy", Method, 0, ""}, {"(StartElement).Copy", Method, 0, ""}, {"(StartElement).End", Method, 2, ""}, + {"(TokenReader).Token", Method, 10, ""}, {"(UnmarshalError).Error", Method, 0, ""}, + {"(Unmarshaler).UnmarshalXML", Method, 2, ""}, + {"(UnmarshalerAttr).UnmarshalXMLAttr", Method, 2, ""}, {"Attr", Type, 0, ""}, {"Attr.Name", Field, 0, ""}, {"Attr.Value", Field, 0, ""}, @@ -4984,6 +5169,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*String).Value", Method, 8, ""}, {"(Func).String", Method, 0, ""}, {"(Func).Value", Method, 8, ""}, + {"(Var).String", Method, 0, ""}, {"Do", Func, 0, "func(f func(KeyValue))"}, {"Float", Type, 0, ""}, {"Func", Type, 0, ""}, @@ -5039,6 +5225,11 @@ var PackageSymbols = map[string][]Symbol{ {"(*FlagSet).Var", Method, 0, ""}, {"(*FlagSet).Visit", Method, 0, ""}, {"(*FlagSet).VisitAll", Method, 0, ""}, + {"(Getter).Get", Method, 2, ""}, + {"(Getter).Set", Method, 2, ""}, + {"(Getter).String", Method, 2, ""}, + {"(Value).Set", Method, 0, ""}, + {"(Value).String", Method, 0, ""}, {"Arg", Func, 0, "func(i int) string"}, {"Args", Func, 0, "func() []string"}, {"Bool", Func, 0, "func(name string, value bool, usage string) *bool"}, @@ -5090,6 +5281,20 @@ var PackageSymbols = map[string][]Symbol{ {"VisitAll", Func, 0, "func(fn func(*Flag))"}, }, "fmt": { + {"(Formatter).Format", Method, 0, ""}, + {"(GoStringer).GoString", Method, 0, ""}, + {"(ScanState).Read", Method, 0, ""}, + {"(ScanState).ReadRune", Method, 0, ""}, + {"(ScanState).SkipSpace", Method, 0, ""}, + {"(ScanState).Token", Method, 0, ""}, + {"(ScanState).UnreadRune", Method, 0, ""}, + {"(ScanState).Width", Method, 0, ""}, + {"(Scanner).Scan", Method, 0, ""}, + {"(State).Flag", Method, 0, ""}, + {"(State).Precision", Method, 0, ""}, + {"(State).Width", Method, 0, ""}, + {"(State).Write", Method, 0, ""}, + {"(Stringer).String", Method, 0, ""}, {"Append", Func, 19, "func(b []byte, a ...any) []byte"}, {"Appendf", Func, 19, "func(b []byte, format string, a ...any) []byte"}, {"Appendln", Func, 19, "func(b []byte, a ...any) []byte"}, @@ -5248,7 +5453,18 @@ var PackageSymbols = map[string][]Symbol{ {"(CommentMap).Filter", Method, 1, ""}, {"(CommentMap).String", Method, 1, ""}, {"(CommentMap).Update", Method, 1, ""}, + {"(Decl).End", Method, 0, ""}, + {"(Decl).Pos", Method, 0, ""}, + {"(Expr).End", Method, 0, ""}, + {"(Expr).Pos", Method, 0, ""}, + {"(Node).End", Method, 0, ""}, + {"(Node).Pos", Method, 0, ""}, {"(ObjKind).String", Method, 0, ""}, + {"(Spec).End", Method, 0, ""}, + {"(Spec).Pos", Method, 0, ""}, + {"(Stmt).End", Method, 0, ""}, + {"(Stmt).Pos", Method, 0, ""}, + {"(Visitor).Visit", Method, 0, ""}, {"ArrayType", Type, 0, ""}, {"ArrayType.Elt", Field, 0, ""}, {"ArrayType.Lbrack", Field, 0, ""}, @@ -5271,6 +5487,7 @@ var PackageSymbols = map[string][]Symbol{ {"BasicLit", Type, 0, ""}, {"BasicLit.Kind", Field, 0, ""}, {"BasicLit.Value", Field, 0, ""}, + {"BasicLit.ValueEnd", Field, 26, ""}, {"BasicLit.ValuePos", Field, 0, ""}, {"BinaryExpr", Type, 0, ""}, {"BinaryExpr.Op", Field, 0, ""}, @@ -5320,7 +5537,6 @@ var PackageSymbols = map[string][]Symbol{ {"CompositeLit.Rbrace", Field, 0, ""}, {"CompositeLit.Type", Field, 0, ""}, {"Con", Const, 0, ""}, - {"Decl", Type, 0, ""}, {"DeclStmt", Type, 0, ""}, {"DeclStmt.Decl", Field, 0, ""}, {"DeferStmt", Type, 0, ""}, @@ -5341,7 +5557,6 @@ var PackageSymbols = map[string][]Symbol{ {"EmptyStmt", Type, 0, ""}, {"EmptyStmt.Implicit", Field, 5, ""}, {"EmptyStmt.Semicolon", Field, 0, ""}, - {"Expr", Type, 0, ""}, {"ExprStmt", Type, 0, ""}, {"ExprStmt.X", Field, 0, ""}, {"Field", Type, 0, ""}, @@ -5525,11 +5740,9 @@ var PackageSymbols = map[string][]Symbol{ {"SliceExpr.Slice3", Field, 2, ""}, {"SliceExpr.X", Field, 0, ""}, {"SortImports", Func, 0, "func(fset *token.FileSet, f *File)"}, - {"Spec", Type, 0, ""}, {"StarExpr", Type, 0, ""}, {"StarExpr.Star", Field, 0, ""}, {"StarExpr.X", Field, 0, ""}, - {"Stmt", Type, 0, ""}, {"StructType", Type, 0, ""}, {"StructType.Fields", Field, 0, ""}, {"StructType.Incomplete", Field, 0, ""}, @@ -5684,10 +5897,11 @@ var PackageSymbols = map[string][]Symbol{ {"(*SyntaxError).Error", Method, 16, ""}, {"(*TagExpr).Eval", Method, 16, ""}, {"(*TagExpr).String", Method, 16, ""}, + {"(Expr).Eval", Method, 16, ""}, + {"(Expr).String", Method, 16, ""}, {"AndExpr", Type, 16, ""}, {"AndExpr.X", Field, 16, ""}, {"AndExpr.Y", Field, 16, ""}, - {"Expr", Type, 16, ""}, {"GoVersion", Func, 21, "func(x Expr) string"}, {"IsGoBuild", Func, 16, "func(line string) bool"}, {"IsPlusBuild", Func, 16, "func(line string) bool"}, @@ -5706,6 +5920,9 @@ var PackageSymbols = map[string][]Symbol{ }, "go/constant": { {"(Kind).String", Method, 18, ""}, + {"(Value).ExactString", Method, 6, ""}, + {"(Value).Kind", Method, 5, ""}, + {"(Value).String", Method, 5, ""}, {"BinaryOp", Func, 5, "func(x_ Value, op token.Token, y_ Value) Value"}, {"BitLen", Func, 5, "func(x Value) int"}, {"Bool", Const, 5, ""}, @@ -5744,7 +5961,6 @@ var PackageSymbols = map[string][]Symbol{ {"UnaryOp", Func, 5, "func(op token.Token, y Value, prec uint) Value"}, {"Unknown", Const, 5, ""}, {"Val", Func, 13, "func(x Value) any"}, - {"Value", Type, 5, ""}, }, "go/doc": { {"(*Package).Filter", Method, 0, ""}, @@ -5828,7 +6044,6 @@ var PackageSymbols = map[string][]Symbol{ {"(*Printer).HTML", Method, 19, ""}, {"(*Printer).Markdown", Method, 19, ""}, {"(*Printer).Text", Method, 19, ""}, - {"Block", Type, 19, ""}, {"Code", Type, 19, ""}, {"Code.Text", Field, 19, ""}, {"DefaultLookupPackage", Func, 19, "func(name string) (importPath string, ok bool)"}, @@ -5873,7 +6088,6 @@ var PackageSymbols = map[string][]Symbol{ {"Printer.TextCodePrefix", Field, 19, ""}, {"Printer.TextPrefix", Field, 19, ""}, {"Printer.TextWidth", Field, 19, ""}, - {"Text", Type, 19, ""}, }, "go/format": { {"Node", Func, 1, "func(dst io.Writer, fset *token.FileSet, node any) error"}, @@ -5945,6 +6159,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*File).AddLineColumnInfo", Method, 11, ""}, {"(*File).AddLineInfo", Method, 0, ""}, {"(*File).Base", Method, 0, ""}, + {"(*File).End", Method, 26, ""}, {"(*File).Line", Method, 0, ""}, {"(*File).LineCount", Method, 0, ""}, {"(*File).LineStart", Method, 12, ""}, @@ -6307,6 +6522,22 @@ var PackageSymbols = map[string][]Symbol{ {"(Checker).PkgNameOf", Method, 22, ""}, {"(Checker).TypeOf", Method, 5, ""}, {"(Error).Error", Method, 5, ""}, + {"(Importer).Import", Method, 5, ""}, + {"(ImporterFrom).Import", Method, 6, ""}, + {"(ImporterFrom).ImportFrom", Method, 6, ""}, + {"(Object).Exported", Method, 5, ""}, + {"(Object).Id", Method, 5, ""}, + {"(Object).Name", Method, 5, ""}, + {"(Object).Parent", Method, 5, ""}, + {"(Object).Pkg", Method, 5, ""}, + {"(Object).Pos", Method, 5, ""}, + {"(Object).String", Method, 5, ""}, + {"(Object).Type", Method, 5, ""}, + {"(Sizes).Alignof", Method, 5, ""}, + {"(Sizes).Offsetsof", Method, 5, ""}, + {"(Sizes).Sizeof", Method, 5, ""}, + {"(Type).String", Method, 5, ""}, + {"(Type).Underlying", Method, 5, ""}, {"(TypeAndValue).Addressable", Method, 5, ""}, {"(TypeAndValue).Assignable", Method, 5, ""}, {"(TypeAndValue).HasOk", Method, 5, ""}, @@ -6445,7 +6676,6 @@ var PackageSymbols = map[string][]Symbol{ {"NewUnion", Func, 18, "func(terms []*Term) *Union"}, {"NewVar", Func, 5, "func(pos token.Pos, pkg *Package, name string, typ Type) *Var"}, {"Nil", Type, 5, ""}, - {"Object", Type, 5, ""}, {"ObjectString", Func, 5, "func(obj Object, qf Qualifier) string"}, {"Package", Type, 5, ""}, {"PackageVar", Const, 25, ""}, @@ -6516,6 +6746,33 @@ var PackageSymbols = map[string][]Symbol{ {"Lang", Func, 22, "func(x string) string"}, }, "hash": { + {"(Cloner).BlockSize", Method, 25, ""}, + {"(Cloner).Clone", Method, 25, ""}, + {"(Cloner).Reset", Method, 25, ""}, + {"(Cloner).Size", Method, 25, ""}, + {"(Cloner).Sum", Method, 25, ""}, + {"(Cloner).Write", Method, 25, ""}, + {"(Hash).BlockSize", Method, 0, ""}, + {"(Hash).Reset", Method, 0, ""}, + {"(Hash).Size", Method, 0, ""}, + {"(Hash).Sum", Method, 0, ""}, + {"(Hash).Write", Method, 0, ""}, + {"(Hash32).BlockSize", Method, 0, ""}, + {"(Hash32).Reset", Method, 0, ""}, + {"(Hash32).Size", Method, 0, ""}, + {"(Hash32).Sum", Method, 0, ""}, + {"(Hash32).Sum32", Method, 0, ""}, + {"(Hash32).Write", Method, 0, ""}, + {"(Hash64).BlockSize", Method, 0, ""}, + {"(Hash64).Reset", Method, 0, ""}, + {"(Hash64).Size", Method, 0, ""}, + {"(Hash64).Sum", Method, 0, ""}, + {"(Hash64).Sum64", Method, 0, ""}, + {"(Hash64).Write", Method, 0, ""}, + {"(XOF).BlockSize", Method, 25, ""}, + {"(XOF).Read", Method, 25, ""}, + {"(XOF).Reset", Method, 25, ""}, + {"(XOF).Write", Method, 25, ""}, {"Cloner", Type, 25, ""}, {"Hash", Type, 0, ""}, {"Hash32", Type, 0, ""}, @@ -6781,6 +7038,13 @@ var PackageSymbols = map[string][]Symbol{ {"(*YCbCr).SubImage", Method, 0, ""}, {"(*YCbCr).YCbCrAt", Method, 4, ""}, {"(*YCbCr).YOffset", Method, 0, ""}, + {"(Image).At", Method, 0, ""}, + {"(Image).Bounds", Method, 0, ""}, + {"(Image).ColorModel", Method, 0, ""}, + {"(PalettedImage).At", Method, 0, ""}, + {"(PalettedImage).Bounds", Method, 0, ""}, + {"(PalettedImage).ColorIndexAt", Method, 0, ""}, + {"(PalettedImage).ColorModel", Method, 0, ""}, {"(Point).Add", Method, 0, ""}, {"(Point).Div", Method, 0, ""}, {"(Point).Eq", Method, 0, ""}, @@ -6789,6 +7053,10 @@ var PackageSymbols = map[string][]Symbol{ {"(Point).Mul", Method, 0, ""}, {"(Point).String", Method, 0, ""}, {"(Point).Sub", Method, 0, ""}, + {"(RGBA64Image).At", Method, 17, ""}, + {"(RGBA64Image).Bounds", Method, 17, ""}, + {"(RGBA64Image).ColorModel", Method, 17, ""}, + {"(RGBA64Image).RGBA64At", Method, 17, ""}, {"(Rectangle).Add", Method, 0, ""}, {"(Rectangle).At", Method, 5, ""}, {"(Rectangle).Bounds", Method, 5, ""}, @@ -6913,8 +7181,10 @@ var PackageSymbols = map[string][]Symbol{ {"(Alpha).RGBA", Method, 0, ""}, {"(Alpha16).RGBA", Method, 0, ""}, {"(CMYK).RGBA", Method, 5, ""}, + {"(Color).RGBA", Method, 0, ""}, {"(Gray).RGBA", Method, 0, ""}, {"(Gray16).RGBA", Method, 0, ""}, + {"(Model).Convert", Method, 0, ""}, {"(NRGBA).RGBA", Method, 0, ""}, {"(NRGBA64).RGBA", Method, 0, ""}, {"(NYCbCrA).RGBA", Method, 6, ""}, @@ -6992,7 +7262,19 @@ var PackageSymbols = map[string][]Symbol{ {"WebSafe", Var, 2, ""}, }, "image/draw": { + {"(Drawer).Draw", Method, 2, ""}, + {"(Image).At", Method, 0, ""}, + {"(Image).Bounds", Method, 0, ""}, + {"(Image).ColorModel", Method, 0, ""}, + {"(Image).Set", Method, 0, ""}, {"(Op).Draw", Method, 2, ""}, + {"(Quantizer).Quantize", Method, 2, ""}, + {"(RGBA64Image).At", Method, 17, ""}, + {"(RGBA64Image).Bounds", Method, 17, ""}, + {"(RGBA64Image).ColorModel", Method, 17, ""}, + {"(RGBA64Image).RGBA64At", Method, 17, ""}, + {"(RGBA64Image).Set", Method, 17, ""}, + {"(RGBA64Image).SetRGBA64", Method, 17, ""}, {"Draw", Func, 0, "func(dst Image, r image.Rectangle, src image.Image, sp image.Point, op Op)"}, {"DrawMask", Func, 0, "func(dst Image, r image.Rectangle, src image.Image, sp image.Point, mask image.Image, mp image.Point, op Op)"}, {"Drawer", Type, 2, ""}, @@ -7027,6 +7309,8 @@ var PackageSymbols = map[string][]Symbol{ }, "image/jpeg": { {"(FormatError).Error", Method, 0, ""}, + {"(Reader).Read", Method, 0, ""}, + {"(Reader).ReadByte", Method, 0, ""}, {"(UnsupportedError).Error", Method, 0, ""}, {"Decode", Func, 0, "func(r io.Reader) (image.Image, error)"}, {"DecodeConfig", Func, 0, "func(r io.Reader) (image.Config, error)"}, @@ -7040,6 +7324,8 @@ var PackageSymbols = map[string][]Symbol{ }, "image/png": { {"(*Encoder).Encode", Method, 4, ""}, + {"(EncoderBufferPool).Get", Method, 9, ""}, + {"(EncoderBufferPool).Put", Method, 9, ""}, {"(FormatError).Error", Method, 0, ""}, {"(UnsupportedError).Error", Method, 0, ""}, {"BestCompression", Const, 4, ""}, @@ -7083,6 +7369,41 @@ var PackageSymbols = map[string][]Symbol{ {"(*SectionReader).ReadAt", Method, 0, ""}, {"(*SectionReader).Seek", Method, 0, ""}, {"(*SectionReader).Size", Method, 0, ""}, + {"(ByteReader).ReadByte", Method, 0, ""}, + {"(ByteScanner).ReadByte", Method, 0, ""}, + {"(ByteScanner).UnreadByte", Method, 0, ""}, + {"(ByteWriter).WriteByte", Method, 1, ""}, + {"(Closer).Close", Method, 0, ""}, + {"(ReadCloser).Close", Method, 0, ""}, + {"(ReadCloser).Read", Method, 0, ""}, + {"(ReadSeekCloser).Close", Method, 16, ""}, + {"(ReadSeekCloser).Read", Method, 16, ""}, + {"(ReadSeekCloser).Seek", Method, 16, ""}, + {"(ReadSeeker).Read", Method, 0, ""}, + {"(ReadSeeker).Seek", Method, 0, ""}, + {"(ReadWriteCloser).Close", Method, 0, ""}, + {"(ReadWriteCloser).Read", Method, 0, ""}, + {"(ReadWriteCloser).Write", Method, 0, ""}, + {"(ReadWriteSeeker).Read", Method, 0, ""}, + {"(ReadWriteSeeker).Seek", Method, 0, ""}, + {"(ReadWriteSeeker).Write", Method, 0, ""}, + {"(ReadWriter).Read", Method, 0, ""}, + {"(ReadWriter).Write", Method, 0, ""}, + {"(Reader).Read", Method, 0, ""}, + {"(ReaderAt).ReadAt", Method, 0, ""}, + {"(ReaderFrom).ReadFrom", Method, 0, ""}, + {"(RuneReader).ReadRune", Method, 0, ""}, + {"(RuneScanner).ReadRune", Method, 0, ""}, + {"(RuneScanner).UnreadRune", Method, 0, ""}, + {"(Seeker).Seek", Method, 0, ""}, + {"(StringWriter).WriteString", Method, 12, ""}, + {"(WriteCloser).Close", Method, 0, ""}, + {"(WriteCloser).Write", Method, 0, ""}, + {"(WriteSeeker).Seek", Method, 0, ""}, + {"(WriteSeeker).Write", Method, 0, ""}, + {"(Writer).Write", Method, 0, ""}, + {"(WriterAt).WriteAt", Method, 0, ""}, + {"(WriterTo).WriteTo", Method, 0, ""}, {"ByteReader", Type, 0, ""}, {"ByteScanner", Type, 0, ""}, {"ByteWriter", Type, 1, ""}, @@ -7142,11 +7463,42 @@ var PackageSymbols = map[string][]Symbol{ {"(*PathError).Error", Method, 16, ""}, {"(*PathError).Timeout", Method, 16, ""}, {"(*PathError).Unwrap", Method, 16, ""}, + {"(DirEntry).Info", Method, 16, ""}, + {"(DirEntry).IsDir", Method, 16, ""}, + {"(DirEntry).Name", Method, 16, ""}, + {"(DirEntry).Type", Method, 16, ""}, + {"(FS).Open", Method, 16, ""}, + {"(File).Close", Method, 16, ""}, + {"(File).Read", Method, 16, ""}, + {"(File).Stat", Method, 16, ""}, + {"(FileInfo).IsDir", Method, 16, ""}, + {"(FileInfo).ModTime", Method, 16, ""}, + {"(FileInfo).Mode", Method, 16, ""}, + {"(FileInfo).Name", Method, 16, ""}, + {"(FileInfo).Size", Method, 16, ""}, + {"(FileInfo).Sys", Method, 16, ""}, {"(FileMode).IsDir", Method, 16, ""}, {"(FileMode).IsRegular", Method, 16, ""}, {"(FileMode).Perm", Method, 16, ""}, {"(FileMode).String", Method, 16, ""}, {"(FileMode).Type", Method, 16, ""}, + {"(GlobFS).Glob", Method, 16, ""}, + {"(GlobFS).Open", Method, 16, ""}, + {"(ReadDirFS).Open", Method, 16, ""}, + {"(ReadDirFS).ReadDir", Method, 16, ""}, + {"(ReadDirFile).Close", Method, 16, ""}, + {"(ReadDirFile).Read", Method, 16, ""}, + {"(ReadDirFile).ReadDir", Method, 16, ""}, + {"(ReadDirFile).Stat", Method, 16, ""}, + {"(ReadFileFS).Open", Method, 16, ""}, + {"(ReadFileFS).ReadFile", Method, 16, ""}, + {"(ReadLinkFS).Lstat", Method, 25, ""}, + {"(ReadLinkFS).Open", Method, 25, ""}, + {"(ReadLinkFS).ReadLink", Method, 25, ""}, + {"(StatFS).Open", Method, 16, ""}, + {"(StatFS).Stat", Method, 16, ""}, + {"(SubFS).Open", Method, 16, ""}, + {"(SubFS).Sub", Method, 16, ""}, {"DirEntry", Type, 16, ""}, {"ErrClosed", Var, 16, ""}, {"ErrExist", Var, 16, ""}, @@ -7299,12 +7651,18 @@ var PackageSymbols = map[string][]Symbol{ {"(*TextHandler).WithGroup", Method, 21, ""}, {"(Attr).Equal", Method, 21, ""}, {"(Attr).String", Method, 21, ""}, + {"(Handler).Enabled", Method, 21, ""}, + {"(Handler).Handle", Method, 21, ""}, + {"(Handler).WithAttrs", Method, 21, ""}, + {"(Handler).WithGroup", Method, 21, ""}, {"(Kind).String", Method, 21, ""}, {"(Level).AppendText", Method, 24, ""}, {"(Level).Level", Method, 21, ""}, {"(Level).MarshalJSON", Method, 21, ""}, {"(Level).MarshalText", Method, 21, ""}, {"(Level).String", Method, 21, ""}, + {"(Leveler).Level", Method, 21, ""}, + {"(LogValuer).LogValue", Method, 21, ""}, {"(Record).Attrs", Method, 21, ""}, {"(Record).Clone", Method, 21, ""}, {"(Record).NumAttrs", Method, 21, ""}, @@ -7833,6 +8191,11 @@ var PackageSymbols = map[string][]Symbol{ {"(*Rand).Uint32", Method, 0, ""}, {"(*Rand).Uint64", Method, 8, ""}, {"(*Zipf).Uint64", Method, 0, ""}, + {"(Source).Int63", Method, 0, ""}, + {"(Source).Seed", Method, 0, ""}, + {"(Source64).Int63", Method, 8, ""}, + {"(Source64).Seed", Method, 8, ""}, + {"(Source64).Uint64", Method, 8, ""}, {"ExpFloat64", Func, 0, "func() float64"}, {"Float32", Func, 0, "func() float32"}, {"Float64", Func, 0, "func() float64"}, @@ -7888,6 +8251,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*Rand).Uint64N", Method, 22, ""}, {"(*Rand).UintN", Method, 22, ""}, {"(*Zipf).Uint64", Method, 22, ""}, + {"(Source).Uint64", Method, 22, ""}, {"ChaCha8", Type, 22, ""}, {"ExpFloat64", Func, 22, "func() float64"}, {"Float32", Func, 22, "func() float32"}, @@ -7951,6 +8315,10 @@ var PackageSymbols = map[string][]Symbol{ {"(*Writer).FormDataContentType", Method, 0, ""}, {"(*Writer).SetBoundary", Method, 1, ""}, {"(*Writer).WriteField", Method, 0, ""}, + {"(File).Close", Method, 0, ""}, + {"(File).Read", Method, 0, ""}, + {"(File).ReadAt", Method, 0, ""}, + {"(File).Seek", Method, 0, ""}, {"ErrMessageTooLarge", Var, 9, ""}, {"File", Type, 0, ""}, {"FileContentDisposition", Func, 25, "func(fieldname string, filename string) string"}, @@ -8135,6 +8503,19 @@ var PackageSymbols = map[string][]Symbol{ {"(*UnixListener).SetDeadline", Method, 0, ""}, {"(*UnixListener).SetUnlinkOnClose", Method, 8, ""}, {"(*UnixListener).SyscallConn", Method, 10, ""}, + {"(Addr).Network", Method, 0, ""}, + {"(Addr).String", Method, 0, ""}, + {"(Conn).Close", Method, 0, ""}, + {"(Conn).LocalAddr", Method, 0, ""}, + {"(Conn).Read", Method, 0, ""}, + {"(Conn).RemoteAddr", Method, 0, ""}, + {"(Conn).SetDeadline", Method, 0, ""}, + {"(Conn).SetReadDeadline", Method, 0, ""}, + {"(Conn).SetWriteDeadline", Method, 0, ""}, + {"(Conn).Write", Method, 0, ""}, + {"(Error).Error", Method, 0, ""}, + {"(Error).Temporary", Method, 0, ""}, + {"(Error).Timeout", Method, 0, ""}, {"(Flags).String", Method, 0, ""}, {"(HardwareAddr).String", Method, 0, ""}, {"(IP).AppendText", Method, 24, ""}, @@ -8158,6 +8539,16 @@ var PackageSymbols = map[string][]Symbol{ {"(InvalidAddrError).Error", Method, 0, ""}, {"(InvalidAddrError).Temporary", Method, 0, ""}, {"(InvalidAddrError).Timeout", Method, 0, ""}, + {"(Listener).Accept", Method, 0, ""}, + {"(Listener).Addr", Method, 0, ""}, + {"(Listener).Close", Method, 0, ""}, + {"(PacketConn).Close", Method, 0, ""}, + {"(PacketConn).LocalAddr", Method, 0, ""}, + {"(PacketConn).ReadFrom", Method, 0, ""}, + {"(PacketConn).SetDeadline", Method, 0, ""}, + {"(PacketConn).SetReadDeadline", Method, 0, ""}, + {"(PacketConn).SetWriteDeadline", Method, 0, ""}, + {"(PacketConn).WriteTo", Method, 0, ""}, {"(UnknownNetworkError).Error", Method, 0, ""}, {"(UnknownNetworkError).Temporary", Method, 0, ""}, {"(UnknownNetworkError).Timeout", Method, 0, ""}, @@ -8333,6 +8724,14 @@ var PackageSymbols = map[string][]Symbol{ {"(*Client).Head", Method, 0, ""}, {"(*Client).Post", Method, 0, ""}, {"(*Client).PostForm", Method, 0, ""}, + {"(*ClientConn).Available", Method, 26, ""}, + {"(*ClientConn).Close", Method, 26, ""}, + {"(*ClientConn).Err", Method, 26, ""}, + {"(*ClientConn).InFlight", Method, 26, ""}, + {"(*ClientConn).Release", Method, 26, ""}, + {"(*ClientConn).Reserve", Method, 26, ""}, + {"(*ClientConn).RoundTrip", Method, 26, ""}, + {"(*ClientConn).SetStateHook", Method, 26, ""}, {"(*Cookie).String", Method, 0, ""}, {"(*Cookie).Valid", Method, 18, ""}, {"(*CrossOriginProtection).AddInsecureBypassPattern", Method, 25, ""}, @@ -8392,10 +8791,22 @@ var PackageSymbols = map[string][]Symbol{ {"(*Transport).CancelRequest", Method, 1, ""}, {"(*Transport).Clone", Method, 13, ""}, {"(*Transport).CloseIdleConnections", Method, 0, ""}, + {"(*Transport).NewClientConn", Method, 26, ""}, {"(*Transport).RegisterProtocol", Method, 0, ""}, {"(*Transport).RoundTrip", Method, 0, ""}, + {"(CloseNotifier).CloseNotify", Method, 1, ""}, {"(ConnState).String", Method, 3, ""}, + {"(CookieJar).Cookies", Method, 0, ""}, + {"(CookieJar).SetCookies", Method, 0, ""}, {"(Dir).Open", Method, 0, ""}, + {"(File).Close", Method, 0, ""}, + {"(File).Read", Method, 0, ""}, + {"(File).Readdir", Method, 0, ""}, + {"(File).Seek", Method, 0, ""}, + {"(File).Stat", Method, 0, ""}, + {"(FileSystem).Open", Method, 0, ""}, + {"(Flusher).Flush", Method, 0, ""}, + {"(Handler).ServeHTTP", Method, 0, ""}, {"(HandlerFunc).ServeHTTP", Method, 0, ""}, {"(Header).Add", Method, 0, ""}, {"(Header).Clone", Method, 13, ""}, @@ -8405,10 +8816,16 @@ var PackageSymbols = map[string][]Symbol{ {"(Header).Values", Method, 14, ""}, {"(Header).Write", Method, 0, ""}, {"(Header).WriteSubset", Method, 0, ""}, + {"(Hijacker).Hijack", Method, 0, ""}, {"(Protocols).HTTP1", Method, 24, ""}, {"(Protocols).HTTP2", Method, 24, ""}, {"(Protocols).String", Method, 24, ""}, {"(Protocols).UnencryptedHTTP2", Method, 24, ""}, + {"(Pusher).Push", Method, 8, ""}, + {"(ResponseWriter).Header", Method, 0, ""}, + {"(ResponseWriter).Write", Method, 0, ""}, + {"(ResponseWriter).WriteHeader", Method, 0, ""}, + {"(RoundTripper).RoundTrip", Method, 0, ""}, {"AllowQuerySemicolons", Func, 17, "func(h Handler) Handler"}, {"CanonicalHeaderKey", Func, 0, "func(s string) string"}, {"Client", Type, 0, ""}, @@ -8416,6 +8833,7 @@ var PackageSymbols = map[string][]Symbol{ {"Client.Jar", Field, 0, ""}, {"Client.Timeout", Field, 3, ""}, {"Client.Transport", Field, 0, ""}, + {"ClientConn", Type, 26, ""}, {"CloseNotifier", Type, 1, ""}, {"ConnState", Type, 3, ""}, {"Cookie", Type, 0, ""}, @@ -8726,6 +9144,8 @@ var PackageSymbols = map[string][]Symbol{ "net/http/cookiejar": { {"(*Jar).Cookies", Method, 1, ""}, {"(*Jar).SetCookies", Method, 1, ""}, + {"(PublicSuffixList).PublicSuffix", Method, 1, ""}, + {"(PublicSuffixList).String", Method, 1, ""}, {"Jar", Type, 1, ""}, {"New", Func, 1, "func(o *Options) (*Jar, error)"}, {"Options", Type, 1, ""}, @@ -8819,6 +9239,8 @@ var PackageSymbols = map[string][]Symbol{ {"(*ServerConn).Pending", Method, 0, ""}, {"(*ServerConn).Read", Method, 0, ""}, {"(*ServerConn).Write", Method, 0, ""}, + {"(BufferPool).Get", Method, 6, ""}, + {"(BufferPool).Put", Method, 6, ""}, {"BufferPool", Type, 6, ""}, {"ClientConn", Type, 0, ""}, {"DumpRequest", Func, 0, "func(req *http.Request, body bool) ([]byte, error)"}, @@ -8972,6 +9394,14 @@ var PackageSymbols = map[string][]Symbol{ {"(*Server).ServeConn", Method, 0, ""}, {"(*Server).ServeHTTP", Method, 0, ""}, {"(*Server).ServeRequest", Method, 0, ""}, + {"(ClientCodec).Close", Method, 0, ""}, + {"(ClientCodec).ReadResponseBody", Method, 0, ""}, + {"(ClientCodec).ReadResponseHeader", Method, 0, ""}, + {"(ClientCodec).WriteRequest", Method, 0, ""}, + {"(ServerCodec).Close", Method, 0, ""}, + {"(ServerCodec).ReadRequestBody", Method, 0, ""}, + {"(ServerCodec).ReadRequestHeader", Method, 0, ""}, + {"(ServerCodec).WriteResponse", Method, 0, ""}, {"(ServerError).Error", Method, 0, ""}, {"Accept", Func, 0, "func(lis net.Listener)"}, {"Call", Type, 0, ""}, @@ -9030,6 +9460,8 @@ var PackageSymbols = map[string][]Symbol{ {"(*Client).StartTLS", Method, 0, ""}, {"(*Client).TLSConnectionState", Method, 5, ""}, {"(*Client).Verify", Method, 0, ""}, + {"(Auth).Next", Method, 0, ""}, + {"(Auth).Start", Method, 0, ""}, {"Auth", Type, 0, ""}, {"CRAMMD5Auth", Func, 0, "func(username string, secret string) Auth"}, {"Client", Type, 0, ""}, @@ -9241,10 +9673,18 @@ var PackageSymbols = map[string][]Symbol{ {"(*SyscallError).Error", Method, 0, ""}, {"(*SyscallError).Timeout", Method, 10, ""}, {"(*SyscallError).Unwrap", Method, 13, ""}, + {"(FileInfo).IsDir", Method, 0, ""}, + {"(FileInfo).ModTime", Method, 0, ""}, + {"(FileInfo).Mode", Method, 0, ""}, + {"(FileInfo).Name", Method, 0, ""}, + {"(FileInfo).Size", Method, 0, ""}, + {"(FileInfo).Sys", Method, 0, ""}, {"(FileMode).IsDir", Method, 0, ""}, {"(FileMode).IsRegular", Method, 1, ""}, {"(FileMode).Perm", Method, 0, ""}, {"(FileMode).String", Method, 0, ""}, + {"(Signal).Signal", Method, 0, ""}, + {"(Signal).String", Method, 0, ""}, {"Args", Var, 0, ""}, {"Chdir", Func, 0, "func(dir string) error"}, {"Chmod", Func, 0, "func(name string, mode FileMode) error"}, @@ -9521,6 +9961,45 @@ var PackageSymbols = map[string][]Symbol{ {"(StructField).IsExported", Method, 17, ""}, {"(StructTag).Get", Method, 0, ""}, {"(StructTag).Lookup", Method, 7, ""}, + {"(Type).Align", Method, 0, ""}, + {"(Type).AssignableTo", Method, 0, ""}, + {"(Type).Bits", Method, 0, ""}, + {"(Type).CanSeq", Method, 23, ""}, + {"(Type).CanSeq2", Method, 23, ""}, + {"(Type).ChanDir", Method, 0, ""}, + {"(Type).Comparable", Method, 4, ""}, + {"(Type).ConvertibleTo", Method, 1, ""}, + {"(Type).Elem", Method, 0, ""}, + {"(Type).Field", Method, 0, ""}, + {"(Type).FieldAlign", Method, 0, ""}, + {"(Type).FieldByIndex", Method, 0, ""}, + {"(Type).FieldByName", Method, 0, ""}, + {"(Type).FieldByNameFunc", Method, 0, ""}, + {"(Type).Fields", Method, 26, ""}, + {"(Type).Implements", Method, 0, ""}, + {"(Type).In", Method, 0, ""}, + {"(Type).Ins", Method, 26, ""}, + {"(Type).IsVariadic", Method, 0, ""}, + {"(Type).Key", Method, 0, ""}, + {"(Type).Kind", Method, 0, ""}, + {"(Type).Len", Method, 0, ""}, + {"(Type).Method", Method, 0, ""}, + {"(Type).MethodByName", Method, 0, ""}, + {"(Type).Methods", Method, 26, ""}, + {"(Type).Name", Method, 0, ""}, + {"(Type).NumField", Method, 0, ""}, + {"(Type).NumIn", Method, 0, ""}, + {"(Type).NumMethod", Method, 0, ""}, + {"(Type).NumOut", Method, 0, ""}, + {"(Type).Out", Method, 0, ""}, + {"(Type).Outs", Method, 26, ""}, + {"(Type).OverflowComplex", Method, 23, ""}, + {"(Type).OverflowFloat", Method, 23, ""}, + {"(Type).OverflowInt", Method, 23, ""}, + {"(Type).OverflowUint", Method, 23, ""}, + {"(Type).PkgPath", Method, 0, ""}, + {"(Type).Size", Method, 0, ""}, + {"(Type).String", Method, 0, ""}, {"(Value).Addr", Method, 0, ""}, {"(Value).Bool", Method, 0, ""}, {"(Value).Bytes", Method, 0, ""}, @@ -9547,6 +10026,7 @@ var PackageSymbols = map[string][]Symbol{ {"(Value).FieldByIndexErr", Method, 18, ""}, {"(Value).FieldByName", Method, 0, ""}, {"(Value).FieldByNameFunc", Method, 0, ""}, + {"(Value).Fields", Method, 26, ""}, {"(Value).Float", Method, 0, ""}, {"(Value).Grow", Method, 20, ""}, {"(Value).Index", Method, 0, ""}, @@ -9563,6 +10043,7 @@ var PackageSymbols = map[string][]Symbol{ {"(Value).MapRange", Method, 12, ""}, {"(Value).Method", Method, 0, ""}, {"(Value).MethodByName", Method, 0, ""}, + {"(Value).Methods", Method, 26, ""}, {"(Value).NumField", Method, 0, ""}, {"(Value).NumMethod", Method, 0, ""}, {"(Value).OverflowComplex", Method, 0, ""}, @@ -9678,7 +10159,6 @@ var PackageSymbols = map[string][]Symbol{ {"StructOf", Func, 7, "func(fields []StructField) Type"}, {"StructTag", Type, 0, ""}, {"Swapper", Func, 8, "func(slice any) func(i int, j int)"}, - {"Type", Type, 0, ""}, {"TypeAssert", Func, 25, "func[T any](v Value) (T, bool)"}, {"TypeFor", Func, 22, "func[T any]() Type"}, {"TypeOf", Func, 0, "func(i any) Type"}, @@ -9880,6 +10360,8 @@ var PackageSymbols = map[string][]Symbol{ {"(*TypeAssertionError).Error", Method, 0, ""}, {"(*TypeAssertionError).RuntimeError", Method, 0, ""}, {"(Cleanup).Stop", Method, 24, ""}, + {"(Error).Error", Method, 0, ""}, + {"(Error).RuntimeError", Method, 0, ""}, {"AddCleanup", Func, 24, "func[T, S any](ptr *T, cleanup func(S), arg S) Cleanup"}, {"BlockProfile", Func, 1, "func(p []BlockProfileRecord) (n int, ok bool)"}, {"BlockProfileRecord", Type, 1, ""}, @@ -10154,6 +10636,9 @@ var PackageSymbols = map[string][]Symbol{ {"(IntSlice).Search", Method, 0, ""}, {"(IntSlice).Sort", Method, 0, ""}, {"(IntSlice).Swap", Method, 0, ""}, + {"(Interface).Len", Method, 0, ""}, + {"(Interface).Less", Method, 0, ""}, + {"(Interface).Swap", Method, 0, ""}, {"(StringSlice).Len", Method, 0, ""}, {"(StringSlice).Less", Method, 0, ""}, {"(StringSlice).Search", Method, 0, ""}, @@ -10345,6 +10830,8 @@ var PackageSymbols = map[string][]Symbol{ {"(*WaitGroup).Done", Method, 0, ""}, {"(*WaitGroup).Go", Method, 25, ""}, {"(*WaitGroup).Wait", Method, 0, ""}, + {"(Locker).Lock", Method, 0, ""}, + {"(Locker).Unlock", Method, 0, ""}, {"Cond", Type, 0, ""}, {"Cond.L", Field, 0, ""}, {"Locker", Type, 0, ""}, @@ -10486,10 +10973,14 @@ var PackageSymbols = map[string][]Symbol{ {"(*Timeval).Nano", Method, 0, ""}, {"(*Timeval).Nanoseconds", Method, 0, ""}, {"(*Timeval).Unix", Method, 0, ""}, + {"(Conn).SyscallConn", Method, 9, ""}, {"(Errno).Error", Method, 0, ""}, {"(Errno).Is", Method, 13, ""}, {"(Errno).Temporary", Method, 0, ""}, {"(Errno).Timeout", Method, 0, ""}, + {"(RawConn).Control", Method, 9, ""}, + {"(RawConn).Read", Method, 9, ""}, + {"(RawConn).Write", Method, 9, ""}, {"(Signal).Signal", Method, 0, ""}, {"(Signal).String", Method, 0, ""}, {"(Token).Close", Method, 0, ""}, @@ -14409,7 +14900,7 @@ var PackageSymbols = map[string][]Symbol{ {"RouteMessage.Data", Field, 0, ""}, {"RouteMessage.Header", Field, 0, ""}, {"RouteRIB", Func, 0, ""}, - {"RoutingMessage", Type, 0, ""}, + {"RoutingMessage", Type, 14, ""}, {"RtAttr", Type, 0, ""}, {"RtAttr.Len", Field, 0, ""}, {"RtAttr.Type", Field, 0, ""}, @@ -15895,7 +16386,6 @@ var PackageSymbols = map[string][]Symbol{ {"SockFprog.Filter", Field, 0, ""}, {"SockFprog.Len", Field, 0, ""}, {"SockFprog.Pad_cgo_0", Field, 0, ""}, - {"Sockaddr", Type, 0, ""}, {"SockaddrDatalink", Type, 0, ""}, {"SockaddrDatalink.Alen", Field, 0, ""}, {"SockaddrDatalink.Data", Field, 0, ""}, @@ -16801,6 +17291,29 @@ var PackageSymbols = map[string][]Symbol{ {"(BenchmarkResult).MemString", Method, 1, ""}, {"(BenchmarkResult).NsPerOp", Method, 0, ""}, {"(BenchmarkResult).String", Method, 0, ""}, + {"(TB).ArtifactDir", Method, 26, ""}, + {"(TB).Attr", Method, 25, ""}, + {"(TB).Chdir", Method, 24, ""}, + {"(TB).Cleanup", Method, 14, ""}, + {"(TB).Context", Method, 24, ""}, + {"(TB).Error", Method, 2, ""}, + {"(TB).Errorf", Method, 2, ""}, + {"(TB).Fail", Method, 2, ""}, + {"(TB).FailNow", Method, 2, ""}, + {"(TB).Failed", Method, 2, ""}, + {"(TB).Fatal", Method, 2, ""}, + {"(TB).Fatalf", Method, 2, ""}, + {"(TB).Helper", Method, 9, ""}, + {"(TB).Log", Method, 2, ""}, + {"(TB).Logf", Method, 2, ""}, + {"(TB).Name", Method, 8, ""}, + {"(TB).Output", Method, 25, ""}, + {"(TB).Setenv", Method, 17, ""}, + {"(TB).Skip", Method, 2, ""}, + {"(TB).SkipNow", Method, 2, ""}, + {"(TB).Skipf", Method, 2, ""}, + {"(TB).Skipped", Method, 2, ""}, + {"(TB).TempDir", Method, 15, ""}, {"AllocsPerRun", Func, 1, "func(runs int, f func()) (avg float64)"}, {"B", Type, 0, ""}, {"B.N", Field, 0, ""}, @@ -16851,7 +17364,6 @@ var PackageSymbols = map[string][]Symbol{ {"RunTests", Func, 0, "func(matchString func(pat string, str string) (bool, error), tests []InternalTest) (ok bool)"}, {"Short", Func, 0, "func() bool"}, {"T", Type, 0, ""}, - {"TB", Type, 2, ""}, {"Testing", Func, 21, "func() bool"}, {"Verbose", Func, 1, "func() bool"}, }, @@ -16887,6 +17399,7 @@ var PackageSymbols = map[string][]Symbol{ "testing/quick": { {"(*CheckEqualError).Error", Method, 0, ""}, {"(*CheckError).Error", Method, 0, ""}, + {"(Generator).Generate", Method, 0, ""}, {"(SetupError).Error", Method, 0, ""}, {"Check", Func, 0, "func(f any, config *Config) error"}, {"CheckEqual", Func, 0, "func(f any, g any, config *Config) error"}, @@ -17093,6 +17606,10 @@ var PackageSymbols = map[string][]Symbol{ {"(ListNode).Position", Method, 1, ""}, {"(ListNode).Type", Method, 0, ""}, {"(NilNode).Position", Method, 1, ""}, + {"(Node).Copy", Method, 0, ""}, + {"(Node).Position", Method, 1, ""}, + {"(Node).String", Method, 0, ""}, + {"(Node).Type", Method, 0, ""}, {"(NodeType).Type", Method, 0, ""}, {"(NumberNode).Position", Method, 1, ""}, {"(NumberNode).Type", Method, 0, ""}, diff --git a/vendor/golang.org/x/tools/internal/stdlib/stdlib.go b/vendor/golang.org/x/tools/internal/stdlib/stdlib.go index e223e0f34..59a5de36a 100644 --- a/vendor/golang.org/x/tools/internal/stdlib/stdlib.go +++ b/vendor/golang.org/x/tools/internal/stdlib/stdlib.go @@ -39,7 +39,7 @@ const ( Var // "EOF" Const // "Pi" Field // "Point.X" - Method // "(*Buffer).Grow" + Method // "(*Buffer).Grow" or "(Reader).Read" ) func (kind Kind) String() string { diff --git a/vendor/golang.org/x/tools/internal/typesinternal/classify_call.go b/vendor/golang.org/x/tools/internal/typesinternal/classify_call.go index 3db2a135b..7ebe9768b 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/classify_call.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/classify_call.go @@ -8,7 +8,7 @@ import ( "fmt" "go/ast" "go/types" - _ "unsafe" + _ "unsafe" // for go:linkname hack ) // CallKind describes the function position of an [*ast.CallExpr]. diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go index fef74a785..51001666e 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/types.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/types.go @@ -23,7 +23,6 @@ import ( "go/token" "go/types" "reflect" - "unsafe" "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/internal/aliases" @@ -40,8 +39,7 @@ func SetUsesCgo(conf *types.Config) bool { } } - addr := unsafe.Pointer(f.UnsafeAddr()) - *(*bool)(addr) = true + *(*bool)(f.Addr().UnsafePointer()) = true return true } diff --git a/vendor/golang.org/x/tools/internal/versions/features.go b/vendor/golang.org/x/tools/internal/versions/features.go index a5f4e3252..cdd36c388 100644 --- a/vendor/golang.org/x/tools/internal/versions/features.go +++ b/vendor/golang.org/x/tools/internal/versions/features.go @@ -9,6 +9,7 @@ package versions // named constants, to avoid misspelling const ( + Go1_17 = "go1.17" Go1_18 = "go1.18" Go1_19 = "go1.19" Go1_20 = "go1.20" diff --git a/vendor/k8s.io/klog/v2/internal/verbosity/verbosity.go b/vendor/k8s.io/klog/v2/internal/verbosity/verbosity.go new file mode 100644 index 000000000..40ec27d87 --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/verbosity/verbosity.go @@ -0,0 +1,303 @@ +/* +Copyright 2013 Google Inc. All Rights Reserved. +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package verbosity + +import ( + "bytes" + "errors" + "flag" + "fmt" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" +) + +// New returns a struct that implements -v and -vmodule support. Changing and +// checking these settings is thread-safe, with all concurrency issues handled +// internally. +func New() *VState { + vs := new(VState) + + // The two fields must have a pointer to the overal struct for their + // implementation of Set. + vs.vmodule.vs = vs + vs.verbosity.vs = vs + + return vs +} + +// Value is an extension that makes it possible to use the values in pflag. +type Value interface { + flag.Value + Type() string +} + +func (vs *VState) V() Value { + return &vs.verbosity +} + +func (vs *VState) VModule() Value { + return &vs.vmodule +} + +// VState contains settings and state. Some of its fields can be accessed +// through atomic read/writes, in other cases a mutex must be held. +type VState struct { + mu sync.Mutex + + // These flags are modified only under lock, although verbosity may be fetched + // safely using atomic.LoadInt32. + vmodule moduleSpec // The state of the -vmodule flag. + verbosity levelSpec // V logging level, the value of the -v flag/ + + // pcs is used in V to avoid an allocation when computing the caller's PC. + pcs [1]uintptr + // vmap is a cache of the V Level for each V() call site, identified by PC. + // It is wiped whenever the vmodule flag changes state. + vmap map[uintptr]Level + // filterLength stores the length of the vmodule filter chain. If greater + // than zero, it means vmodule is enabled. It may be read safely + // using sync.LoadInt32, but is only modified under mu. + filterLength int32 +} + +// Level must be an int32 to support atomic read/writes. +type Level int32 + +type levelSpec struct { + vs *VState + l Level +} + +// get returns the value of the level. +func (l *levelSpec) get() Level { + return Level(atomic.LoadInt32((*int32)(&l.l))) +} + +// set sets the value of the level. +func (l *levelSpec) set(val Level) { + atomic.StoreInt32((*int32)(&l.l), int32(val)) +} + +// String is part of the flag.Value interface. +func (l *levelSpec) String() string { + return strconv.FormatInt(int64(l.l), 10) +} + +// Get is part of the flag.Getter interface. It returns the +// verbosity level as int32. +func (l *levelSpec) Get() interface{} { + return int32(l.l) +} + +// Type is part of pflag.Value. +func (l *levelSpec) Type() string { + return "Level" +} + +// Set is part of the flag.Value interface. +func (l *levelSpec) Set(value string) error { + v, err := strconv.ParseInt(value, 10, 32) + if err != nil { + return err + } + l.vs.mu.Lock() + defer l.vs.mu.Unlock() + l.vs.set(Level(v), l.vs.vmodule.filter, false) + return nil +} + +// moduleSpec represents the setting of the -vmodule flag. +type moduleSpec struct { + vs *VState + filter []modulePat +} + +// modulePat contains a filter for the -vmodule flag. +// It holds a verbosity level and a file pattern to match. +type modulePat struct { + pattern string + literal bool // The pattern is a literal string + level Level +} + +// match reports whether the file matches the pattern. It uses a string +// comparison if the pattern contains no metacharacters. +func (m *modulePat) match(file string) bool { + if m.literal { + return file == m.pattern + } + match, _ := filepath.Match(m.pattern, file) + return match +} + +func (m *moduleSpec) String() string { + // Lock because the type is not atomic. TODO: clean this up. + // Empty instances don't have and don't need a lock (can + // happen when flag uses introspection). + if m.vs != nil { + m.vs.mu.Lock() + defer m.vs.mu.Unlock() + } + var b bytes.Buffer + for i, f := range m.filter { + if i > 0 { + b.WriteRune(',') + } + fmt.Fprintf(&b, "%s=%d", f.pattern, f.level) + } + return b.String() +} + +// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the +// struct is not exported. +func (m *moduleSpec) Get() interface{} { + return nil +} + +// Type is part of pflag.Value +func (m *moduleSpec) Type() string { + return "pattern=N,..." +} + +var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N") + +// Set will sets module value +// Syntax: -vmodule=recordio=2,file=1,gfs*=3 +func (m *moduleSpec) Set(value string) error { + var filter []modulePat + for _, pat := range strings.Split(value, ",") { + if len(pat) == 0 { + // Empty strings such as from a trailing comma can be ignored. + continue + } + patLev := strings.Split(pat, "=") + if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 { + return errVmoduleSyntax + } + pattern := patLev[0] + v, err := strconv.ParseInt(patLev[1], 10, 32) + if err != nil { + return errors.New("syntax error: expect comma-separated list of filename=N") + } + if v < 0 { + return errors.New("negative value for vmodule level") + } + if v == 0 { + continue // Ignore. It's harmless but no point in paying the overhead. + } + // TODO: check syntax of filter? + filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)}) + } + m.vs.mu.Lock() + defer m.vs.mu.Unlock() + m.vs.set(m.vs.verbosity.l, filter, true) + return nil +} + +// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters +// that require filepath.Match to be called to match the pattern. +func isLiteral(pattern string) bool { + return !strings.ContainsAny(pattern, `\*?[]`) +} + +// set sets a consistent state for V logging. +// The mutex must be held. +func (vs *VState) set(l Level, filter []modulePat, setFilter bool) { + // Turn verbosity off so V will not fire while we are in transition. + vs.verbosity.set(0) + // Ditto for filter length. + atomic.StoreInt32(&vs.filterLength, 0) + + // Set the new filters and wipe the pc->Level map if the filter has changed. + if setFilter { + vs.vmodule.filter = filter + vs.vmap = make(map[uintptr]Level) + } + + // Things are consistent now, so enable filtering and verbosity. + // They are enabled in order opposite to that in V. + atomic.StoreInt32(&vs.filterLength, int32(len(filter))) + vs.verbosity.set(l) +} + +// Enabled checks whether logging is enabled at the given level. This must be +// called with depth=0 when the caller of enabled will do the logging and +// higher values when more stack levels need to be skipped. +// +// The mutex will be locked only if needed. +func (vs *VState) Enabled(level Level, depth int) bool { + // This function tries hard to be cheap unless there's work to do. + // The fast path is two atomic loads and compares. + + // Here is a cheap but safe test to see if V logging is enabled globally. + if vs.verbosity.get() >= level { + return true + } + + // It's off globally but vmodule may still be set. + // Here is another cheap but safe test to see if vmodule is enabled. + if atomic.LoadInt32(&vs.filterLength) > 0 { + // Now we need a proper lock to use the logging structure. The pcs field + // is shared so we must lock before accessing it. This is fairly expensive, + // but if V logging is enabled we're slow anyway. + vs.mu.Lock() + defer vs.mu.Unlock() + if runtime.Callers(depth+2, vs.pcs[:]) == 0 { + return false + } + // runtime.Callers returns "return PCs", but we want + // to look up the symbolic information for the call, + // so subtract 1 from the PC. runtime.CallersFrames + // would be cleaner, but allocates. + pc := vs.pcs[0] - 1 + v, ok := vs.vmap[pc] + if !ok { + v = vs.setV(pc) + } + return v >= level + } + return false +} + +// setV computes and remembers the V level for a given PC +// when vmodule is enabled. +// File pattern matching takes the basename of the file, stripped +// of its .go suffix, and uses filepath.Match, which is a little more +// general than the *? matching used in C++. +// Mutex is held. +func (vs *VState) setV(pc uintptr) Level { + fn := runtime.FuncForPC(pc) + file, _ := fn.FileLine(pc) + // The file is something like /a/b/c/d.go. We want just the d. + file = strings.TrimSuffix(file, ".go") + if slash := strings.LastIndex(file, "/"); slash >= 0 { + file = file[slash+1:] + } + for _, filter := range vs.vmodule.filter { + if filter.match(file) { + vs.vmap[pc] = filter.level + return filter.level + } + } + vs.vmap[pc] = 0 + return 0 +} diff --git a/vendor/k8s.io/klog/v2/textlogger/options.go b/vendor/k8s.io/klog/v2/textlogger/options.go new file mode 100644 index 000000000..b1c4eefb3 --- /dev/null +++ b/vendor/k8s.io/klog/v2/textlogger/options.go @@ -0,0 +1,154 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package textlogger + +import ( + "flag" + "io" + "os" + "strconv" + "time" + + "k8s.io/klog/v2/internal/verbosity" +) + +// Config influences logging in a text logger. To make this configurable via +// command line flags, instantiate this once per program and use AddFlags to +// bind command line flags to the instance before passing it to NewTestContext. +// +// Must be constructed with NewConfig. +type Config struct { + vstate *verbosity.VState + co configOptions +} + +// Verbosity returns a value instance that can be used to query (via String) or +// modify (via Set) the verbosity threshold. This is thread-safe and can be +// done at runtime. +func (c *Config) Verbosity() flag.Value { + return c.vstate.V() +} + +// VModule returns a value instance that can be used to query (via String) or +// modify (via Set) the vmodule settings. This is thread-safe and can be done +// at runtime. +func (c *Config) VModule() flag.Value { + return c.vstate.VModule() +} + +// ConfigOption implements functional parameters for NewConfig. +type ConfigOption func(co *configOptions) + +type configOptions struct { + verbosityFlagName string + vmoduleFlagName string + verbosityDefault int + fixedTime *time.Time + unwind func(int) (string, int) + output io.Writer +} + +// VerbosityFlagName overrides the default -v for the verbosity level. +func VerbosityFlagName(name string) ConfigOption { + return func(co *configOptions) { + + co.verbosityFlagName = name + } +} + +// VModulFlagName overrides the default -vmodule for the per-module +// verbosity levels. +func VModuleFlagName(name string) ConfigOption { + return func(co *configOptions) { + co.vmoduleFlagName = name + } +} + +// Verbosity overrides the default verbosity level of 0. +// See https://github.com/kubernetes/community/blob/9406b4352fe2d5810cb21cc3cb059ce5886de157/contributors/devel/sig-instrumentation/logging.md#logging-conventions +// for log level conventions in Kubernetes. +func Verbosity(level int) ConfigOption { + return func(co *configOptions) { + co.verbosityDefault = level + } +} + +// Output overrides stderr as the output stream. +func Output(output io.Writer) ConfigOption { + return func(co *configOptions) { + co.output = output + } +} + +// FixedTime overrides the actual time with a fixed time. Useful only for testing. +// +// # Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func FixedTime(ts time.Time) ConfigOption { + return func(co *configOptions) { + co.fixedTime = &ts + } +} + +// Backtrace overrides the default mechanism for determining the call site. +// The callback is invoked with the number of function calls between itself +// and the call site. It must return the file name and line number. An empty +// file name indicates that the information is unknown. +// +// # Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func Backtrace(unwind func(skip int) (filename string, line int)) ConfigOption { + return func(co *configOptions) { + co.unwind = unwind + } +} + +// NewConfig returns a configuration with recommended defaults and optional +// modifications. Command line flags are not bound to any FlagSet yet. +func NewConfig(opts ...ConfigOption) *Config { + c := &Config{ + vstate: verbosity.New(), + co: configOptions{ + verbosityFlagName: "v", + vmoduleFlagName: "vmodule", + verbosityDefault: 0, + unwind: runtimeBacktrace, + output: os.Stderr, + }, + } + for _, opt := range opts { + opt(&c.co) + } + + // Cannot fail for this input. + _ = c.Verbosity().Set(strconv.FormatInt(int64(c.co.verbosityDefault), 10)) + return c +} + +// AddFlags registers the command line flags that control the configuration. +// +// The default flag names are the same as in klog, so unless those defaults +// are changed, either klog.InitFlags or Config.AddFlags can be used for the +// same flag set, but not both. +func (c *Config) AddFlags(fs *flag.FlagSet) { + fs.Var(c.Verbosity(), c.co.verbosityFlagName, "number for the log level verbosity of the testing logger") + fs.Var(c.VModule(), c.co.vmoduleFlagName, "comma-separated list of pattern=N log level settings for files matching the patterns") +} diff --git a/vendor/k8s.io/klog/v2/textlogger/textlogger.go b/vendor/k8s.io/klog/v2/textlogger/textlogger.go new file mode 100644 index 000000000..0b55a2994 --- /dev/null +++ b/vendor/k8s.io/klog/v2/textlogger/textlogger.go @@ -0,0 +1,187 @@ +/* +Copyright 2019 The Kubernetes Authors. +Copyright 2020 Intel Corporation. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package textlogger contains an implementation of the logr interface which is +// producing the exact same output as klog. It does not route output through +// klog (i.e. ignores [k8s.io/klog/v2.InitFlags]). Instead, all settings must be +// configured through its own [NewConfig] and [Config.AddFlags]. +package textlogger + +import ( + "runtime" + "strconv" + "strings" + "time" + + "github.com/go-logr/logr" + + "k8s.io/klog/v2/internal/buffer" + "k8s.io/klog/v2/internal/serialize" + "k8s.io/klog/v2/internal/severity" + "k8s.io/klog/v2/internal/verbosity" +) + +var ( + // TimeNow is used to retrieve the current time. May be changed for testing. + TimeNow = time.Now +) + +const ( + // nameKey is used to log the `WithName` values as an additional attribute. + nameKey = "logger" +) + +// NewLogger constructs a new logger. +// +// Verbosity can be modified at any time through the Config.V and +// Config.VModule API. +func NewLogger(c *Config) logr.Logger { + return logr.New(&tlogger{ + values: nil, + config: c, + }) +} + +type tlogger struct { + callDepth int + + // hasPrefix is true if the first entry in values is the special + // nameKey key/value. Such an entry gets added and later updated in + // WithName. + hasPrefix bool + + values []interface{} + groups string + config *Config +} + +func (l *tlogger) Init(info logr.RuntimeInfo) { + l.callDepth = info.CallDepth +} + +func (l *tlogger) WithCallDepth(depth int) logr.LogSink { + newLogger := *l + newLogger.callDepth += depth + return &newLogger +} + +func (l *tlogger) Enabled(level int) bool { + return l.config.vstate.Enabled(verbosity.Level(level), 1+l.callDepth) +} + +func (l *tlogger) Info(_ int, msg string, kvList ...interface{}) { + l.print(nil, severity.InfoLog, msg, kvList) +} + +func (l *tlogger) Error(err error, msg string, kvList ...interface{}) { + l.print(err, severity.ErrorLog, msg, kvList) +} + +func (l *tlogger) print(err error, s severity.Severity, msg string, kvList []interface{}) { + // Determine caller. + // +1 for this frame, +1 for Info/Error. + skip := l.callDepth + 2 + file, line := l.config.co.unwind(skip) + if file == "" { + file = "???" + line = 1 + } else if slash := strings.LastIndex(file, "/"); slash >= 0 { + file = file[slash+1:] + } + l.printWithInfos(file, line, time.Now(), err, s, msg, kvList) +} + +func runtimeBacktrace(skip int) (string, int) { + _, file, line, ok := runtime.Caller(skip + 1) + if !ok { + return "", 0 + } + return file, line +} + +func (l *tlogger) printWithInfos(file string, line int, now time.Time, err error, s severity.Severity, msg string, kvList []interface{}) { + // Only create a new buffer if we don't have one cached. + b := buffer.GetBuffer() + defer buffer.PutBuffer(b) + + // Format header. + if l.config.co.fixedTime != nil { + now = *l.config.co.fixedTime + } + b.FormatHeader(s, file, line, now) + + // The message is always quoted, even if it contains line breaks. + // If developers want multi-line output, they should use a small, fixed + // message and put the multi-line output into a value. + b.WriteString(strconv.Quote(msg)) + if err != nil { + serialize.KVFormat(&b.Buffer, "err", err) + } + serialize.MergeAndFormatKVs(&b.Buffer, l.values, kvList) + if b.Len() == 0 || b.Bytes()[b.Len()-1] != '\n' { + b.WriteByte('\n') + } + _, _ = l.config.co.output.Write(b.Bytes()) +} + +func (l *tlogger) WriteKlogBuffer(data []byte) { + _, _ = l.config.co.output.Write(data) +} + +// WithName returns a new logr.Logger with the specified name appended. klogr +// uses '/' characters to separate name elements. Callers should not pass '/' +// in the provided name string, but this library does not actually enforce that. +func (l *tlogger) WithName(name string) logr.LogSink { + clone := *l + if l.hasPrefix { + // Copy slice and modify value. No length checks and type + // assertions are needed because hasPrefix is only true if the + // first two elements exist and are key/value strings. + v := make([]interface{}, 0, len(l.values)) + v = append(v, l.values...) + prefix, _ := v[1].(string) + v[1] = prefix + "." + name + clone.values = v + } else { + // Preprend new key/value pair. + v := make([]interface{}, 0, 2+len(l.values)) + v = append(v, nameKey, name) + v = append(v, l.values...) + clone.values = v + clone.hasPrefix = true + } + return &clone +} + +func (l *tlogger) WithValues(kvList ...interface{}) logr.LogSink { + clone := *l + clone.values = serialize.WithValues(l.values, kvList) + return &clone +} + +// KlogBufferWriter is implemented by the textlogger LogSink. +type KlogBufferWriter interface { + // WriteKlogBuffer takes a pre-formatted buffer prepared by klog and + // writes it unchanged to the output stream. Can be used with + // klog.WriteKlogBuffer when setting a logger through + // klog.SetLoggerWithOptions. + WriteKlogBuffer([]byte) +} + +var _ logr.LogSink = &tlogger{} +var _ logr.CallDepthLogSink = &tlogger{} +var _ KlogBufferWriter = &tlogger{} diff --git a/vendor/k8s.io/klog/v2/textlogger/textlogger_slog.go b/vendor/k8s.io/klog/v2/textlogger/textlogger_slog.go new file mode 100644 index 000000000..c888ef8a6 --- /dev/null +++ b/vendor/k8s.io/klog/v2/textlogger/textlogger_slog.go @@ -0,0 +1,52 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package textlogger + +import ( + "context" + "log/slog" + + "github.com/go-logr/logr" + + "k8s.io/klog/v2/internal/serialize" + "k8s.io/klog/v2/internal/sloghandler" +) + +func (l *tlogger) Handle(ctx context.Context, record slog.Record) error { + return sloghandler.Handle(ctx, record, l.groups, l.printWithInfos) +} + +func (l *tlogger) WithAttrs(attrs []slog.Attr) logr.SlogSink { + clone := *l + clone.values = serialize.WithValues(l.values, sloghandler.Attrs2KVList(l.groups, attrs)) + return &clone +} + +func (l *tlogger) WithGroup(name string) logr.SlogSink { + clone := *l + if clone.groups != "" { + clone.groups += "." + name + } else { + clone.groups = name + } + return &clone +} + +var _ logr.SlogSink = &tlogger{} diff --git a/vendor/k8s.io/kube-openapi/pkg/util/util.go b/vendor/k8s.io/kube-openapi/pkg/util/util.go index 6eee935b2..830ec3ca0 100644 --- a/vendor/k8s.io/kube-openapi/pkg/util/util.go +++ b/vendor/k8s.io/kube-openapi/pkg/util/util.go @@ -92,10 +92,21 @@ type OpenAPICanonicalTypeNamer interface { OpenAPICanonicalTypeName() string } +// OpenAPIModelNamer is an interface Go types may implement to provide an OpenAPI model name. +// +// This takes precedence over OpenAPICanonicalTypeNamer, and should be used when a Go type has a model +// name that differs from its canonical type name as determined by Go package name reflection. +type OpenAPIModelNamer interface { + OpenAPIModelName() string +} + // GetCanonicalTypeName will find the canonical type name of a sample object, removing // the "vendor" part of the path func GetCanonicalTypeName(model interface{}) string { - if namer, ok := model.(OpenAPICanonicalTypeNamer); ok { + switch namer := model.(type) { + case OpenAPIModelNamer: + return namer.OpenAPIModelName() + case OpenAPICanonicalTypeNamer: return namer.OpenAPICanonicalTypeName() } t := reflect.TypeOf(model) diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/default.go b/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/default.go index 97b2f989e..23109816e 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/default.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/default.go @@ -17,7 +17,6 @@ package strfmt import ( "encoding/base64" "encoding/json" - "fmt" "net/mail" "regexp" "strings" @@ -247,29 +246,6 @@ func (b *Base64) UnmarshalText(data []byte) error { // validation is performed l return nil } -// Scan read a value from a database driver -func (b *Base64) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - dbuf := make([]byte, base64.StdEncoding.DecodedLen(len(v))) - n, err := base64.StdEncoding.Decode(dbuf, v) - if err != nil { - return err - } - *b = dbuf[:n] - case string: - vv, err := base64.StdEncoding.DecodeString(v) - if err != nil { - return err - } - *b = Base64(vv) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.Base64 from: %#v", v) - } - - return nil -} - func (b Base64) String() string { return base64.StdEncoding.EncodeToString([]byte(b)) } @@ -324,20 +300,6 @@ func (u *URI) UnmarshalText(data []byte) error { // validation is performed late return nil } -// Scan read a value from a database driver -func (u *URI) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - *u = URI(string(v)) - case string: - *u = URI(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.URI from: %#v", v) - } - - return nil -} - func (u URI) String() string { return string(u) } @@ -388,20 +350,6 @@ func (e *Email) UnmarshalText(data []byte) error { // validation is performed la return nil } -// Scan read a value from a database driver -func (e *Email) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - *e = Email(string(v)) - case string: - *e = Email(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.Email from: %#v", v) - } - - return nil -} - func (e Email) String() string { return string(e) } @@ -452,20 +400,6 @@ func (h *Hostname) UnmarshalText(data []byte) error { // validation is performed return nil } -// Scan read a value from a database driver -func (h *Hostname) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - *h = Hostname(string(v)) - case string: - *h = Hostname(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.Hostname from: %#v", v) - } - - return nil -} - func (h Hostname) String() string { return string(h) } @@ -516,20 +450,6 @@ func (u *IPv4) UnmarshalText(data []byte) error { // validation is performed lat return nil } -// Scan read a value from a database driver -func (u *IPv4) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - *u = IPv4(string(v)) - case string: - *u = IPv4(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.IPv4 from: %#v", v) - } - - return nil -} - func (u IPv4) String() string { return string(u) } @@ -580,20 +500,6 @@ func (u *IPv6) UnmarshalText(data []byte) error { // validation is performed lat return nil } -// Scan read a value from a database driver -func (u *IPv6) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - *u = IPv6(string(v)) - case string: - *u = IPv6(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.IPv6 from: %#v", v) - } - - return nil -} - func (u IPv6) String() string { return string(u) } @@ -644,20 +550,6 @@ func (u *CIDR) UnmarshalText(data []byte) error { // validation is performed lat return nil } -// Scan read a value from a database driver -func (u *CIDR) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - *u = CIDR(string(v)) - case string: - *u = CIDR(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.CIDR from: %#v", v) - } - - return nil -} - func (u CIDR) String() string { return string(u) } @@ -708,20 +600,6 @@ func (u *MAC) UnmarshalText(data []byte) error { // validation is performed late return nil } -// Scan read a value from a database driver -func (u *MAC) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - *u = MAC(string(v)) - case string: - *u = MAC(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.IPv4 from: %#v", v) - } - - return nil -} - func (u MAC) String() string { return string(u) } @@ -772,20 +650,6 @@ func (u *UUID) UnmarshalText(data []byte) error { // validation is performed lat return nil } -// Scan read a value from a database driver -func (u *UUID) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - *u = UUID(string(v)) - case string: - *u = UUID(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.UUID from: %#v", v) - } - - return nil -} - func (u UUID) String() string { return string(u) } @@ -839,20 +703,6 @@ func (u *UUID3) UnmarshalText(data []byte) error { // validation is performed la return nil } -// Scan read a value from a database driver -func (u *UUID3) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - *u = UUID3(string(v)) - case string: - *u = UUID3(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.UUID3 from: %#v", v) - } - - return nil -} - func (u UUID3) String() string { return string(u) } @@ -906,20 +756,6 @@ func (u *UUID4) UnmarshalText(data []byte) error { // validation is performed la return nil } -// Scan read a value from a database driver -func (u *UUID4) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - *u = UUID4(string(v)) - case string: - *u = UUID4(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.UUID4 from: %#v", v) - } - - return nil -} - func (u UUID4) String() string { return string(u) } @@ -973,20 +809,6 @@ func (u *UUID5) UnmarshalText(data []byte) error { // validation is performed la return nil } -// Scan read a value from a database driver -func (u *UUID5) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - *u = UUID5(string(v)) - case string: - *u = UUID5(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.UUID5 from: %#v", v) - } - - return nil -} - func (u UUID5) String() string { return string(u) } @@ -1040,20 +862,6 @@ func (u *ISBN) UnmarshalText(data []byte) error { // validation is performed lat return nil } -// Scan read a value from a database driver -func (u *ISBN) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - *u = ISBN(string(v)) - case string: - *u = ISBN(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.ISBN from: %#v", v) - } - - return nil -} - func (u ISBN) String() string { return string(u) } @@ -1107,20 +915,6 @@ func (u *ISBN10) UnmarshalText(data []byte) error { // validation is performed l return nil } -// Scan read a value from a database driver -func (u *ISBN10) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - *u = ISBN10(string(v)) - case string: - *u = ISBN10(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.ISBN10 from: %#v", v) - } - - return nil -} - func (u ISBN10) String() string { return string(u) } @@ -1174,20 +968,6 @@ func (u *ISBN13) UnmarshalText(data []byte) error { // validation is performed l return nil } -// Scan read a value from a database driver -func (u *ISBN13) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - *u = ISBN13(string(v)) - case string: - *u = ISBN13(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.ISBN13 from: %#v", v) - } - - return nil -} - func (u ISBN13) String() string { return string(u) } @@ -1241,20 +1021,6 @@ func (u *CreditCard) UnmarshalText(data []byte) error { // validation is perform return nil } -// Scan read a value from a database driver -func (u *CreditCard) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - *u = CreditCard(string(v)) - case string: - *u = CreditCard(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.CreditCard from: %#v", v) - } - - return nil -} - func (u CreditCard) String() string { return string(u) } @@ -1308,20 +1074,6 @@ func (u *SSN) UnmarshalText(data []byte) error { // validation is performed late return nil } -// Scan read a value from a database driver -func (u *SSN) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - *u = SSN(string(v)) - case string: - *u = SSN(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.SSN from: %#v", v) - } - - return nil -} - func (u SSN) String() string { return string(u) } @@ -1375,20 +1127,6 @@ func (h *HexColor) UnmarshalText(data []byte) error { // validation is performed return nil } -// Scan read a value from a database driver -func (h *HexColor) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - *h = HexColor(string(v)) - case string: - *h = HexColor(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.HexColor from: %#v", v) - } - - return nil -} - func (h HexColor) String() string { return string(h) } @@ -1442,20 +1180,6 @@ func (r *RGBColor) UnmarshalText(data []byte) error { // validation is performed return nil } -// Scan read a value from a database driver -func (r *RGBColor) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - *r = RGBColor(string(v)) - case string: - *r = RGBColor(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.RGBColor from: %#v", v) - } - - return nil -} - func (r RGBColor) String() string { return string(r) } @@ -1510,20 +1234,6 @@ func (r *Password) UnmarshalText(data []byte) error { // validation is performed return nil } -// Scan read a value from a database driver -func (r *Password) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - *r = Password(string(v)) - case string: - *r = Password(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.Password from: %#v", v) - } - - return nil -} - func (r Password) String() string { return string(r) } diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/duration.go b/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/duration.go index 8fbeb635f..04545296b 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/duration.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/duration.go @@ -119,23 +119,6 @@ func ParseDuration(cand string) (time.Duration, error) { return 0, fmt.Errorf("unable to parse %s as duration", cand) } -// Scan reads a Duration value from database driver type. -func (d *Duration) Scan(raw interface{}) error { - switch v := raw.(type) { - // TODO: case []byte: // ? - case int64: - *d = Duration(v) - case float64: - *d = Duration(int64(v)) - case nil: - *d = Duration(0) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.Duration from: %#v", v) - } - - return nil -} - // String converts this duration to a string func (d Duration) String() string { return time.Duration(d).String() diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/time.go b/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/time.go index b2324db05..d0fd31a9d 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/time.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/time.go @@ -16,7 +16,6 @@ package strfmt import ( "encoding/json" - "fmt" "regexp" "strings" "time" @@ -114,25 +113,6 @@ func (t *DateTime) UnmarshalText(text []byte) error { return nil } -// Scan scans a DateTime value from database driver type. -func (t *DateTime) Scan(raw interface{}) error { - // TODO: case int64: and case float64: ? - switch v := raw.(type) { - case []byte: - return t.UnmarshalText(v) - case string: - return t.UnmarshalText([]byte(v)) - case time.Time: - *t = DateTime(v) - case nil: - *t = DateTime{} - default: - return fmt.Errorf("cannot sql.Scan() strfmt.DateTime from: %#v", v) - } - - return nil -} - // MarshalJSON returns the DateTime as JSON func (t DateTime) MarshalJSON() ([]byte, error) { return json.Marshal(time.Time(t).Format(MarshalFormat)) diff --git a/vendor/k8s.io/utils/net/multi_listen.go b/vendor/k8s.io/utils/net/multi_listen.go index 7cb7795be..e5d508055 100644 --- a/vendor/k8s.io/utils/net/multi_listen.go +++ b/vendor/k8s.io/utils/net/multi_listen.go @@ -21,6 +21,7 @@ import ( "fmt" "net" "sync" + "sync/atomic" ) // connErrPair pairs conn and error which is returned by accept on sub-listeners. @@ -38,6 +39,7 @@ type multiListener struct { connCh chan connErrPair // stopCh communicates from parent to child listeners. stopCh chan struct{} + closed atomic.Bool } // compile time check to ensure *multiListener implements net.Listener @@ -150,10 +152,8 @@ func (ml *multiListener) Accept() (net.Conn, error) { // the go-routines to exit. func (ml *multiListener) Close() error { // Make sure this can be called repeatedly without explosions. - select { - case <-ml.stopCh: + if !ml.closed.CompareAndSwap(false, true) { return fmt.Errorf("use of closed network connection") - default: } // Tell all sub-listeners to stop. diff --git a/vendor/modules.txt b/vendor/modules.txt index ed132d3cc..da954fff5 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,5 +1,5 @@ -# cel.dev/expr v0.24.0 -## explicit; go 1.22.0 +# cel.dev/expr v0.25.1 +## explicit; go 1.23.0 cel.dev/expr # dario.cat/mergo v1.0.2 ## explicit; go 1.13 @@ -11,7 +11,7 @@ github.com/AdaLogics/go-fuzz-headers ## explicit; go 1.16 github.com/Azure/go-ansiterm github.com/Azure/go-ansiterm/winterm -# github.com/BurntSushi/toml v1.5.0 +# github.com/BurntSushi/toml v1.6.0 ## explicit; go 1.18 github.com/BurntSushi/toml github.com/BurntSushi/toml/internal @@ -86,11 +86,11 @@ github.com/beorn7/perks/quantile # github.com/blang/semver/v4 v4.0.0 ## explicit; go 1.14 github.com/blang/semver/v4 -# github.com/cenkalti/backoff/v5 v5.0.2 +# github.com/cenkalti/backoff/v5 v5.0.3 ## explicit; go 1.23 github.com/cenkalti/backoff/v5 -# github.com/cert-manager/cert-manager v1.18.2 -## explicit; go 1.23.0 +# github.com/cert-manager/cert-manager v1.19.2 +## explicit; go 1.25.0 github.com/cert-manager/cert-manager/pkg/apis/acme github.com/cert-manager/cert-manager/pkg/apis/acme/v1 github.com/cert-manager/cert-manager/pkg/apis/certmanager @@ -109,8 +109,8 @@ github.com/chai2010/gettext-go/po # github.com/containerd/cgroups/v3 v3.0.5 ## explicit; go 1.22.0 github.com/containerd/cgroups/v3/cgroup1/stats -# github.com/containerd/containerd v1.7.29 -## explicit; go 1.23.0 +# github.com/containerd/containerd v1.7.30 +## explicit; go 1.24.0 github.com/containerd/containerd/archive github.com/containerd/containerd/archive/compression github.com/containerd/containerd/archive/tarheader @@ -200,10 +200,29 @@ github.com/containers/ocicrypt/keywrap/pkcs7 github.com/containers/ocicrypt/spec github.com/containers/ocicrypt/utils github.com/containers/ocicrypt/utils/keyprovider +# github.com/cucumber/gherkin/go/v26 v26.2.0 +## explicit; go 1.19 +github.com/cucumber/gherkin/go/v26 +# github.com/cucumber/godog v0.15.1 +## explicit; go 1.16 +github.com/cucumber/godog +github.com/cucumber/godog/colors +github.com/cucumber/godog/formatters +github.com/cucumber/godog/internal/builder +github.com/cucumber/godog/internal/flags +github.com/cucumber/godog/internal/formatters +github.com/cucumber/godog/internal/models +github.com/cucumber/godog/internal/parser +github.com/cucumber/godog/internal/storage +github.com/cucumber/godog/internal/tags +github.com/cucumber/godog/internal/utils +# github.com/cucumber/messages/go/v21 v21.0.1 +## explicit; go 1.19 +github.com/cucumber/messages/go/v21 # github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 ## explicit github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer -# github.com/cyphar/filepath-securejoin v0.6.0 +# github.com/cyphar/filepath-securejoin v0.6.1 ## explicit; go 1.18 github.com/cyphar/filepath-securejoin github.com/cyphar/filepath-securejoin/internal/consts @@ -298,48 +317,51 @@ github.com/go-logr/logr/funcr # github.com/go-logr/stdr v1.2.2 ## explicit; go 1.16 github.com/go-logr/stdr -# github.com/go-openapi/jsonpointer v0.22.0 -## explicit; go 1.20 +# github.com/go-openapi/jsonpointer v0.22.3 +## explicit; go 1.24.0 github.com/go-openapi/jsonpointer -# github.com/go-openapi/jsonreference v0.21.1 -## explicit; go 1.20 +# github.com/go-openapi/jsonreference v0.21.3 +## explicit; go 1.24.0 github.com/go-openapi/jsonreference github.com/go-openapi/jsonreference/internal -# github.com/go-openapi/swag v0.24.1 -## explicit; go 1.20.0 +# github.com/go-openapi/swag v0.25.4 +## explicit; go 1.24.0 github.com/go-openapi/swag -# github.com/go-openapi/swag/cmdutils v0.24.0 -## explicit; go 1.20.0 +# github.com/go-openapi/swag/cmdutils v0.25.4 +## explicit; go 1.24.0 github.com/go-openapi/swag/cmdutils -# github.com/go-openapi/swag/conv v0.24.0 -## explicit; go 1.20.0 +# github.com/go-openapi/swag/conv v0.25.4 +## explicit; go 1.24.0 github.com/go-openapi/swag/conv -# github.com/go-openapi/swag/fileutils v0.24.0 -## explicit; go 1.20.0 +# github.com/go-openapi/swag/fileutils v0.25.4 +## explicit; go 1.24.0 github.com/go-openapi/swag/fileutils -# github.com/go-openapi/swag/jsonname v0.24.0 -## explicit; go 1.20.0 +# github.com/go-openapi/swag/jsonname v0.25.4 +## explicit; go 1.24.0 github.com/go-openapi/swag/jsonname -# github.com/go-openapi/swag/jsonutils v0.24.0 -## explicit; go 1.20 +# github.com/go-openapi/swag/jsonutils v0.25.4 +## explicit; go 1.24.0 github.com/go-openapi/swag/jsonutils -# github.com/go-openapi/swag/loading v0.24.0 -## explicit; go 1.20.0 +github.com/go-openapi/swag/jsonutils/adapters +github.com/go-openapi/swag/jsonutils/adapters/ifaces +github.com/go-openapi/swag/jsonutils/adapters/stdlib/json +# github.com/go-openapi/swag/loading v0.25.4 +## explicit; go 1.24.0 github.com/go-openapi/swag/loading -# github.com/go-openapi/swag/mangling v0.24.0 -## explicit; go 1.20.0 +# github.com/go-openapi/swag/mangling v0.25.4 +## explicit; go 1.24.0 github.com/go-openapi/swag/mangling -# github.com/go-openapi/swag/netutils v0.24.0 -## explicit; go 1.20.0 +# github.com/go-openapi/swag/netutils v0.25.4 +## explicit; go 1.24.0 github.com/go-openapi/swag/netutils -# github.com/go-openapi/swag/stringutils v0.24.0 -## explicit; go 1.20.0 +# github.com/go-openapi/swag/stringutils v0.25.4 +## explicit; go 1.24.0 github.com/go-openapi/swag/stringutils -# github.com/go-openapi/swag/typeutils v0.24.0 -## explicit; go 1.20.0 +# github.com/go-openapi/swag/typeutils v0.25.4 +## explicit; go 1.24.0 github.com/go-openapi/swag/typeutils -# github.com/go-openapi/swag/yamlutils v0.24.0 -## explicit; go 1.20 +# github.com/go-openapi/swag/yamlutils v0.25.4 +## explicit; go 1.24.0 github.com/go-openapi/swag/yamlutils # github.com/gobuffalo/flect v1.0.3 ## explicit; go 1.16 @@ -354,6 +376,9 @@ github.com/gobwas/glob/syntax/ast github.com/gobwas/glob/syntax/lexer github.com/gobwas/glob/util/runes github.com/gobwas/glob/util/strings +# github.com/gofrs/uuid v4.3.1+incompatible +## explicit +github.com/gofrs/uuid # github.com/gogo/protobuf v1.3.2 ## explicit; go 1.15 github.com/gogo/protobuf/proto @@ -395,7 +420,7 @@ github.com/google/cel-go/interpreter github.com/google/cel-go/interpreter/functions github.com/google/cel-go/parser github.com/google/cel-go/parser/gen -# github.com/google/gnostic-models v0.7.0 +# github.com/google/gnostic-models v0.7.1 ## explicit; go 1.22 github.com/google/gnostic-models/compiler github.com/google/gnostic-models/extensions @@ -467,7 +492,7 @@ github.com/gosuri/uitable/util/wordwrap # github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 ## explicit github.com/gregjones/httpcache -# github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.0 +# github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 ## explicit; go 1.23.0 github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule github.com/grpc-ecosystem/grpc-gateway/v2/runtime @@ -484,9 +509,18 @@ github.com/h2non/go-is-svg # github.com/hashicorp/errwrap v1.1.0 ## explicit github.com/hashicorp/errwrap +# github.com/hashicorp/go-immutable-radix v1.3.1 +## explicit +github.com/hashicorp/go-immutable-radix +# github.com/hashicorp/go-memdb v1.3.4 +## explicit; go 1.13 +github.com/hashicorp/go-memdb # github.com/hashicorp/go-multierror v1.1.1 ## explicit; go 1.13 github.com/hashicorp/go-multierror +# github.com/hashicorp/golang-lru v0.5.4 +## explicit; go 1.12 +github.com/hashicorp/golang-lru/simplelru # github.com/huandu/xstrings v1.5.0 ## explicit; go 1.12 github.com/huandu/xstrings @@ -503,13 +537,10 @@ github.com/jmoiron/sqlx/reflectx # github.com/joelanford/ignore v0.1.1 ## explicit; go 1.21 github.com/joelanford/ignore -# github.com/josharian/intern v1.0.0 -## explicit; go 1.5 -github.com/josharian/intern # github.com/json-iterator/go v1.1.12 ## explicit; go 1.12 github.com/json-iterator/go -# github.com/klauspost/compress v1.18.1 +# github.com/klauspost/compress v1.18.2 ## explicit; go 1.23 github.com/klauspost/compress github.com/klauspost/compress/flate @@ -549,12 +580,6 @@ github.com/lib/pq/scram # github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de ## explicit github.com/liggitt/tabwriter -# github.com/mailru/easyjson v0.9.0 -## explicit; go 1.20 -github.com/mailru/easyjson -github.com/mailru/easyjson/buffer -github.com/mailru/easyjson/jlexer -github.com/mailru/easyjson/jwriter # github.com/mattn/go-colorable v0.1.13 ## explicit; go 1.15 github.com/mattn/go-colorable @@ -623,7 +648,7 @@ github.com/munnerz/goautoneg # github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f ## explicit github.com/mxk/go-flowrate/flowrate -# github.com/onsi/gomega v1.38.2 +# github.com/onsi/gomega v1.38.3 ## explicit; go 1.23.0 github.com/onsi/gomega/gstruct/errors github.com/onsi/gomega/types @@ -637,10 +662,11 @@ github.com/opencontainers/image-spec/specs-go/v1 # github.com/opencontainers/runtime-spec v1.2.1 ## explicit github.com/opencontainers/runtime-spec/specs-go -# github.com/operator-framework/api v0.36.0 +# github.com/operator-framework/api v0.37.0 ## explicit; go 1.24.6 github.com/operator-framework/api/pkg/constraints github.com/operator-framework/api/pkg/encoding +github.com/operator-framework/api/pkg/lib/release github.com/operator-framework/api/pkg/lib/version github.com/operator-framework/api/pkg/manifests github.com/operator-framework/api/pkg/operators @@ -714,8 +740,8 @@ github.com/prometheus/client_model/go ## explicit; go 1.24.0 github.com/prometheus/common/expfmt github.com/prometheus/common/model -# github.com/prometheus/procfs v0.17.0 -## explicit; go 1.23.0 +# github.com/prometheus/procfs v0.19.2 +## explicit; go 1.24.0 github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util @@ -761,7 +787,7 @@ github.com/smallstep/pkcs7/internal/legacy/x509 # github.com/spf13/cast v1.7.1 ## explicit; go 1.19 github.com/spf13/cast -# github.com/spf13/cobra v1.10.1 +# github.com/spf13/cobra v1.10.2 ## explicit; go 1.15 github.com/spf13/cobra # github.com/spf13/pflag v1.0.10 @@ -773,7 +799,7 @@ github.com/stefanberger/go-pkcs11uri # github.com/stoewer/go-strcase v1.3.1 ## explicit; go 1.11 github.com/stoewer/go-strcase -# github.com/stretchr/objx v0.5.2 +# github.com/stretchr/objx v0.5.3 ## explicit; go 1.20 github.com/stretchr/objx # github.com/stretchr/testify v1.11.1 @@ -847,11 +873,11 @@ go.opentelemetry.io/otel/semconv/v1.21.0 go.opentelemetry.io/otel/semconv/v1.26.0 go.opentelemetry.io/otel/semconv/v1.34.0 go.opentelemetry.io/otel/semconv/v1.37.0 -# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform -# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.36.0 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0 ## explicit; go 1.23.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal @@ -971,7 +997,7 @@ go.yaml.in/yaml/v2 # go.yaml.in/yaml/v3 v3.0.4 ## explicit; go 1.16 go.yaml.in/yaml/v3 -# golang.org/x/crypto v0.45.0 +# golang.org/x/crypto v0.46.0 ## explicit; go 1.24.0 golang.org/x/crypto/bcrypt golang.org/x/crypto/blowfish @@ -993,17 +1019,17 @@ golang.org/x/crypto/pbkdf2 golang.org/x/crypto/salsa20/salsa golang.org/x/crypto/scrypt golang.org/x/crypto/sha3 -# golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b +# golang.org/x/exp v0.0.0-20250718183923-645b1fa84792 ## explicit; go 1.23.0 golang.org/x/exp/maps golang.org/x/exp/slices -# golang.org/x/mod v0.30.0 +# golang.org/x/mod v0.31.0 ## explicit; go 1.24.0 golang.org/x/mod/internal/lazyregexp golang.org/x/mod/modfile golang.org/x/mod/module golang.org/x/mod/semver -# golang.org/x/net v0.47.0 +# golang.org/x/net v0.48.0 ## explicit; go 1.24.0 golang.org/x/net/context golang.org/x/net/html @@ -1022,22 +1048,22 @@ golang.org/x/net/websocket ## explicit; go 1.24.0 golang.org/x/oauth2 golang.org/x/oauth2/internal -# golang.org/x/sync v0.18.0 +# golang.org/x/sync v0.19.0 ## explicit; go 1.24.0 golang.org/x/sync/errgroup golang.org/x/sync/semaphore golang.org/x/sync/singleflight -# golang.org/x/sys v0.38.0 +# golang.org/x/sys v0.39.0 ## explicit; go 1.24.0 golang.org/x/sys/cpu golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/term v0.37.0 +# golang.org/x/term v0.38.0 ## explicit; go 1.24.0 golang.org/x/term -# golang.org/x/text v0.31.0 +# golang.org/x/text v0.32.0 ## explicit; go 1.24.0 golang.org/x/text/cases golang.org/x/text/encoding @@ -1062,10 +1088,10 @@ golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/time v0.13.0 +# golang.org/x/time v0.14.0 ## explicit; go 1.24.0 golang.org/x/time/rate -# golang.org/x/tools v0.39.0 +# golang.org/x/tools v0.40.0 ## explicit; go 1.24.0 golang.org/x/tools/go/analysis golang.org/x/tools/go/analysis/analysistest @@ -1240,7 +1266,7 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# helm.sh/helm/v3 v3.19.3 +# helm.sh/helm/v3 v3.19.4 ## explicit; go 1.24.0 helm.sh/helm/v3/internal/fileutil helm.sh/helm/v3/internal/resolver @@ -1279,7 +1305,7 @@ helm.sh/helm/v3/pkg/storage/driver helm.sh/helm/v3/pkg/time helm.sh/helm/v3/pkg/time/ctime helm.sh/helm/v3/pkg/uploader -# k8s.io/api v0.34.1 => k8s.io/api v0.34.0 +# k8s.io/api v0.35.0 => k8s.io/api v0.34.0 ## explicit; go 1.24.0 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -1341,7 +1367,7 @@ k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 k8s.io/api/storagemigration/v1alpha1 -# k8s.io/apiextensions-apiserver v0.34.1 => k8s.io/apiextensions-apiserver v0.34.0 +# k8s.io/apiextensions-apiserver v0.35.0 => k8s.io/apiextensions-apiserver v0.34.0 ## explicit; go 1.24.0 k8s.io/apiextensions-apiserver/pkg/apihelpers k8s.io/apiextensions-apiserver/pkg/apis/apiextensions @@ -1363,7 +1389,7 @@ k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1 k8s.io/apiextensions-apiserver/pkg/features -# k8s.io/apimachinery v0.34.1 => k8s.io/apimachinery v0.34.0 +# k8s.io/apimachinery v0.35.0 => k8s.io/apimachinery v0.34.0 ## explicit; go 1.24.0 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -1437,7 +1463,7 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.34.1 => k8s.io/apiserver v0.34.0 +# k8s.io/apiserver v0.35.0 => k8s.io/apiserver v0.34.0 ## explicit; go 1.24.0 k8s.io/apiserver/pkg/apis/apiserver k8s.io/apiserver/pkg/apis/apiserver/install @@ -1486,13 +1512,13 @@ k8s.io/apiserver/pkg/warning k8s.io/apiserver/plugin/pkg/authenticator/token/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook/metrics -# k8s.io/cli-runtime v0.34.0 => k8s.io/cli-runtime v0.34.0 +# k8s.io/cli-runtime v0.34.2 => k8s.io/cli-runtime v0.34.0 ## explicit; go 1.24.0 k8s.io/cli-runtime/pkg/genericclioptions k8s.io/cli-runtime/pkg/genericiooptions k8s.io/cli-runtime/pkg/printers k8s.io/cli-runtime/pkg/resource -# k8s.io/client-go v0.34.1 => k8s.io/client-go v0.34.0 +# k8s.io/client-go v0.35.0 => k8s.io/client-go v0.34.0 ## explicit; go 1.24.0 k8s.io/client-go/applyconfigurations k8s.io/client-go/applyconfigurations/admissionregistration/v1 @@ -1856,7 +1882,7 @@ k8s.io/client-go/util/jsonpath k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry k8s.io/client-go/util/workqueue -# k8s.io/component-base v0.34.1 => k8s.io/component-base v0.34.0 +# k8s.io/component-base v0.35.0 => k8s.io/component-base v0.34.0 ## explicit; go 1.24.0 k8s.io/component-base/cli/flag k8s.io/component-base/compatibility @@ -1884,8 +1910,10 @@ k8s.io/klog/v2/internal/dbg k8s.io/klog/v2/internal/serialize k8s.io/klog/v2/internal/severity k8s.io/klog/v2/internal/sloghandler -# k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b -## explicit; go 1.23 +k8s.io/klog/v2/internal/verbosity +k8s.io/klog/v2/textlogger +# k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 +## explicit; go 1.23.0 k8s.io/kube-openapi/pkg/cached k8s.io/kube-openapi/pkg/common k8s.io/kube-openapi/pkg/handler3 @@ -1902,7 +1930,7 @@ k8s.io/kube-openapi/pkg/validation/spec k8s.io/kube-openapi/pkg/validation/strfmt k8s.io/kube-openapi/pkg/validation/strfmt/bson k8s.io/kube-openapi/pkg/validation/validate -# k8s.io/kubectl v0.34.0 => k8s.io/kubectl v0.34.0 +# k8s.io/kubectl v0.34.2 => k8s.io/kubectl v0.34.0 ## explicit; go 1.24.0 k8s.io/kubectl/pkg/cmd/util k8s.io/kubectl/pkg/scheme @@ -1919,7 +1947,7 @@ k8s.io/kubernetes/pkg/apis/rbac/v1 k8s.io/kubernetes/pkg/registry/rbac k8s.io/kubernetes/pkg/registry/rbac/validation k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac -# k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 +# k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 ## explicit; go 1.18 k8s.io/utils/buffer k8s.io/utils/clock @@ -1965,8 +1993,8 @@ oras.land/oras-go/v2/registry/remote/credentials/trace oras.land/oras-go/v2/registry/remote/errcode oras.land/oras-go/v2/registry/remote/internal/errutil oras.land/oras-go/v2/registry/remote/retry -# pkg.package-operator.run/boxcutter v0.7.1 -## explicit; go 1.24.0 +# pkg.package-operator.run/boxcutter v0.8.0 +## explicit; go 1.24.6 pkg.package-operator.run/boxcutter pkg.package-operator.run/boxcutter/machinery pkg.package-operator.run/boxcutter/machinery/types @@ -2037,8 +2065,8 @@ sigs.k8s.io/controller-runtime/pkg/webhook/admission/metrics sigs.k8s.io/controller-runtime/pkg/webhook/conversion sigs.k8s.io/controller-runtime/pkg/webhook/conversion/metrics sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics -# sigs.k8s.io/controller-tools v0.19.0 -## explicit; go 1.24.0 +# sigs.k8s.io/controller-tools v0.20.0 +## explicit; go 1.25.0 sigs.k8s.io/controller-tools/pkg/crd sigs.k8s.io/controller-tools/pkg/crd/markers sigs.k8s.io/controller-tools/pkg/genall @@ -2059,10 +2087,10 @@ sigs.k8s.io/crdify/pkg/validations/property sigs.k8s.io/crdify/pkg/validators/crd sigs.k8s.io/crdify/pkg/validators/version/same sigs.k8s.io/crdify/pkg/validators/version/served -# sigs.k8s.io/gateway-api v1.1.0 -## explicit; go 1.22.0 +# sigs.k8s.io/gateway-api v1.4.0 +## explicit; go 1.24.0 sigs.k8s.io/gateway-api/apis/v1 -# sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 +# sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 ## explicit; go 1.23 sigs.k8s.io/json sigs.k8s.io/json/internal/golang/encoding/json @@ -2148,7 +2176,7 @@ sigs.k8s.io/kustomize/kyaml/yaml/walk ## explicit; go 1.18 sigs.k8s.io/randfill sigs.k8s.io/randfill/bytesource -# sigs.k8s.io/structured-merge-diff/v6 v6.3.0 +# sigs.k8s.io/structured-merge-diff/v6 v6.3.1 ## explicit; go 1.23 sigs.k8s.io/structured-merge-diff/v6/fieldpath sigs.k8s.io/structured-merge-diff/v6/merge diff --git a/vendor/pkg.package-operator.run/boxcutter/boxcutter.go b/vendor/pkg.package-operator.run/boxcutter/boxcutter.go index dadf63744..a2133178a 100644 --- a/vendor/pkg.package-operator.run/boxcutter/boxcutter.go +++ b/vendor/pkg.package-operator.run/boxcutter/boxcutter.go @@ -107,14 +107,40 @@ type RevisionEngineOptions struct { Scheme *runtime.Scheme FieldOwner string SystemPrefix string - DiscoveryClient discovery.DiscoveryInterface + DiscoveryClient discovery.OpenAPIV3SchemaInterface RestMapper meta.RESTMapper Writer client.Writer Reader client.Reader // Optional - OwnerStrategy OwnerStrategy + OwnerStrategy OwnerStrategy + PhaseValidator *validation.PhaseValidator +} + +// NewPhaseEngine returns a new PhaseEngine instance. +func NewPhaseEngine(opts RevisionEngineOptions) (*machinery.PhaseEngine, error) { + if err := validateRevisionEngineOpts(opts); err != nil { + return nil, err + } + + if opts.OwnerStrategy == nil { + opts.OwnerStrategy = ownerhandling.NewNative(opts.Scheme) + } + + if opts.PhaseValidator == nil { + opts.PhaseValidator = validation.NewNamespacedPhaseValidator(opts.RestMapper, opts.Writer) + } + + comp := machinery.NewComparator( + opts.OwnerStrategy, opts.DiscoveryClient, opts.Scheme, opts.FieldOwner) + + oe := machinery.NewObjectEngine( + opts.Scheme, opts.Reader, opts.Writer, + opts.OwnerStrategy, comp, opts.FieldOwner, opts.SystemPrefix, + ) + + return machinery.NewPhaseEngine(oe, opts.PhaseValidator), nil } // NewRevisionEngine returns a new RevisionEngine instance. diff --git a/vendor/pkg.package-operator.run/boxcutter/machinery/errors.go b/vendor/pkg.package-operator.run/boxcutter/machinery/errors.go index b763d789a..3a2665ca2 100644 --- a/vendor/pkg.package-operator.run/boxcutter/machinery/errors.go +++ b/vendor/pkg.package-operator.run/boxcutter/machinery/errors.go @@ -1,14 +1,21 @@ package machinery +import ( + "fmt" + + "sigs.k8s.io/controller-runtime/pkg/client" +) + // CreateCollisionError is returned when boxcutter tries to create an object, // but it already exists. \ // This happens when another actor has created the object and caches are slow, // or the colliding object is excluded via cache selectors. type CreateCollisionError struct { - msg string + object client.Object + msg string } // Error implements golangs error interface. func (e CreateCollisionError) Error() string { - return e.msg + return fmt.Sprintf("%s: %s", e.object, e.msg) } diff --git a/vendor/pkg.package-operator.run/boxcutter/machinery/objects.go b/vendor/pkg.package-operator.run/boxcutter/machinery/objects.go index 96b4fb244..5da668ff1 100644 --- a/vendor/pkg.package-operator.run/boxcutter/machinery/objects.go +++ b/vendor/pkg.package-operator.run/boxcutter/machinery/objects.go @@ -9,6 +9,7 @@ import ( "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" machinerytypes "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" @@ -77,6 +78,10 @@ type comparator interface { ) (res CompareResult, err error) } +const ( + boxcutterManagedLabel string = "boxcutter-managed" +) + // Teardown ensures the given object is safely removed from the cluster. func (e *ObjectEngine) Teardown( ctx context.Context, @@ -104,7 +109,12 @@ func (e *ObjectEngine) Teardown( // Shortcut when Owner is orphaning its dependents. // If we don't check this, we might be too quick and start deleting // dependents that should be kept on the cluster! - if controllerutil.ContainsFinalizer(owner, "orphan") { + if controllerutil.ContainsFinalizer(owner, "orphan") || options.Orphan { + err := removeBoxcutterManagedLabel(ctx, e.writer, desiredObject.(*unstructured.Unstructured)) + if err != nil { + return false, err + } + return true, nil } @@ -173,6 +183,14 @@ func (e *ObjectEngine) Reconcile( opt.ApplyToObjectReconcileOptions(&options) } + labels := desiredObject.GetLabels() + if labels == nil { + labels = map[string]string{} + } + + labels[boxcutterManagedLabel] = "True" + desiredObject.SetLabels(labels) + options.Default() // Sanity checks. @@ -219,7 +237,7 @@ func (e *ObjectEngine) Reconcile( if errors.IsAlreadyExists(err) { // Might be a slow cache or an object created by a different actor // but excluded by the cache selector. - return nil, &CreateCollisionError{msg: err.Error()} + return nil, &CreateCollisionError{object: desiredObject, msg: err.Error()} } if err != nil { @@ -231,7 +249,7 @@ func (e *ObjectEngine) Reconcile( } return newObjectResultCreated( - desiredObject, options.Probes), nil + desiredObject, options), nil case err != nil: return nil, fmt.Errorf("getting object: %w", err) @@ -271,7 +289,7 @@ func (e *ObjectEngine) objectUpdateHandling( // Leave object alone. // It's already owned by a later revision. return newObjectResultProgressed( - actualObject, compareRes, options.Probes, + actualObject, compareRes, options, ), nil } @@ -290,7 +308,7 @@ func (e *ObjectEngine) objectUpdateHandling( // No conflict with another field manager // and no modification needed. return newObjectResultIdle( - actualObject, compareRes, options.Probes, + actualObject, compareRes, options, ), nil } @@ -306,7 +324,7 @@ func (e *ObjectEngine) objectUpdateHandling( } return newObjectResultUpdated( - desiredObject, compareRes, options.Probes, + desiredObject, compareRes, options, ), nil } @@ -340,12 +358,12 @@ func (e *ObjectEngine) objectUpdateHandling( if options.Paused { return newObjectResultRecovered( - actualObject, compareRes, options.Probes, + actualObject, compareRes, options, ), nil } return newObjectResultRecovered( - desiredObject, compareRes, options.Probes, + desiredObject, compareRes, options, ), nil // Taking control checklist: @@ -359,7 +377,7 @@ func (e *ObjectEngine) objectUpdateHandling( if options.CollisionProtection != types.CollisionProtectionNone { return newObjectResultConflict( actualObject, compareRes, - actualOwner, options.Probes, + actualOwner, options, ), nil } @@ -368,10 +386,10 @@ func (e *ObjectEngine) objectUpdateHandling( // the object might have been just orphaned, if we re-adopt it now, it would get deleted // by the kubernetes garbage collector. if options.CollisionProtection == types.CollisionProtectionPrevent || - e.hasSystemAnnotationsOrLabels(actualObject) { + e.isBoxcutterManaged(actualObject) { return newObjectResultConflict( actualObject, compareRes, - actualOwner, options.Probes, + actualOwner, options, ), nil } @@ -411,24 +429,18 @@ func (e *ObjectEngine) objectUpdateHandling( if options.Paused { return newObjectResultUpdated( - actualObject, compareRes, options.Probes, + actualObject, compareRes, options, ), nil } return newObjectResultUpdated( - desiredObject, compareRes, options.Probes, + desiredObject, compareRes, options, ), nil } -func (e *ObjectEngine) hasSystemAnnotationsOrLabels(obj client.Object) bool { - for k := range obj.GetAnnotations() { - if strings.HasPrefix(k, e.systemPrefix) { - return true - } - } - +func (e *ObjectEngine) isBoxcutterManaged(obj client.Object) bool { for k := range obj.GetLabels() { - if strings.HasPrefix(k, e.systemPrefix) { + if strings.HasPrefix(k, boxcutterManagedLabel) { return true } } @@ -567,3 +579,20 @@ func (e *ObjectEngine) migrateFieldManagersToSSA( func (e *ObjectEngine) revisionAnnotation() string { return e.systemPrefix + "/revision" } + +func removeBoxcutterManagedLabel( + ctx context.Context, w client.Writer, obj *unstructured.Unstructured, +) error { + updated := obj.DeepCopy() + + labels := updated.GetLabels() + + delete(labels, boxcutterManagedLabel) + updated.SetLabels(labels) + + if err := w.Patch(ctx, updated, client.MergeFrom(obj)); err != nil { + return fmt.Errorf("patching object labels: %w", err) + } + + return nil +} diff --git a/vendor/pkg.package-operator.run/boxcutter/machinery/phases.go b/vendor/pkg.package-operator.run/boxcutter/machinery/phases.go index 703868c46..18eb2b1ab 100644 --- a/vendor/pkg.package-operator.run/boxcutter/machinery/phases.go +++ b/vendor/pkg.package-operator.run/boxcutter/machinery/phases.go @@ -221,7 +221,7 @@ type PhaseResult interface { // InTransition returns true if the Phase has not yet fully rolled out, // if the phase has objects progressed to a new revision or // if objects have unresolved conflicts. - InTransistion() bool + InTransition() bool // IsComplete returns true when all objects have // successfully been reconciled and pass their probes. IsComplete() bool @@ -256,7 +256,7 @@ func (r *phaseResult) GetObjects() []ObjectResult { // InTransition returns true if the Phase has not yet fully rolled out, // if the phase has some objects progressed to a new revision or // if objects have unresolved conflicts. -func (r *phaseResult) InTransistion() bool { +func (r *phaseResult) InTransition() bool { if err := r.GetValidationError(); err != nil { return false } @@ -297,11 +297,7 @@ func (r *phaseResult) IsComplete() bool { } for _, o := range r.objects { - if o.Action() == ActionCollision { - return false - } - - if probe, ok := o.Probes()[types.ProgressProbeType]; ok && !probe.Success { + if !o.IsComplete() { return false } } @@ -312,7 +308,7 @@ func (r *phaseResult) IsComplete() bool { func (r *phaseResult) String() string { out := fmt.Sprintf( "Phase %q\nComplete: %t\nIn Transition: %t\n", - r.name, r.IsComplete(), r.InTransistion(), + r.name, r.IsComplete(), r.InTransition(), ) if err := r.GetValidationError(); err != nil { diff --git a/vendor/pkg.package-operator.run/boxcutter/machinery/results.go b/vendor/pkg.package-operator.run/boxcutter/machinery/results.go index 4a7c82fc7..97395d123 100644 --- a/vendor/pkg.package-operator.run/boxcutter/machinery/results.go +++ b/vendor/pkg.package-operator.run/boxcutter/machinery/results.go @@ -16,14 +16,21 @@ type ObjectResult interface { Action() Action // Object as last seen on the cluster after creation/update. Object() Object - // Success returns true when the operation is considered successful. - // Operations are considered a success, when the object reflects desired state, - // is owned by the right controller and passes the given probe. - Success() bool // Probes returns the results from the given object Probes. - Probes() map[string]ObjectProbeResult + ProbeResults() types.ProbeResultContainer // String returns a human readable description of the Result. String() string + // IsComplete returns true when: + // - the reconciler has been paused and action is "Idle" or "Progressed" + // - there has been no collision + // - the progression probe succeeded + // it returns false when: + // - the reconciler has been paused and action is not "Idle" or "Progressed" + // - there has been a collision + // - the progression probe failed or returned unknown + IsComplete() bool + // IsPaused returns true when the WithPaused option has been set. + IsPaused() bool } // ObjectProbeResult records probe results for the object. @@ -44,16 +51,18 @@ var ( // ObjectResultCreated is returned when the Object was just created. type ObjectResultCreated struct { obj Object - probeResults map[string]ObjectProbeResult + probeResults types.ProbeResultContainer + options types.ObjectReconcileOptions } func newObjectResultCreated( obj Object, - probes map[string]types.Prober, + options types.ObjectReconcileOptions, ) ObjectResult { return ObjectResultCreated{ obj: obj, - probeResults: runProbes(obj, probes), + probeResults: runProbes(obj, options.Probes), + options: options, } } @@ -67,21 +76,25 @@ func (r ObjectResultCreated) Object() Object { return r.obj } -// Success returns true when the operation is considered successful. -// Operations are considered a success, when the object reflects desired state, -// is owned by the right controller and passes the given probe. -func (r ObjectResultCreated) Success() bool { - for _, res := range r.probeResults { - if !res.Success { - return false - } - } +// IsPaused returns true when the WithPaused option has been set. +func (r ObjectResultCreated) IsPaused() bool { + return r.options.Paused +} - return true +// IsComplete returns true when: +// - the reconciler has been paused and action is "Idle" or "Progressed" +// - there has been no collision +// - the progression probe succeeded +// it returns false when: +// - the reconciler has been paused and action is not "Idle" or "Progressed" +// - there has been a collision +// - the progression probe failed or returned unknown. +func (r ObjectResultCreated) IsComplete() bool { + return isComplete(ActionCreated, r.probeResults, r.options) } -// Probes returns the results from the given object Probe. -func (r ObjectResultCreated) Probes() map[string]ObjectProbeResult { +// ProbeResults returns the results from the given object Probe. +func (r ObjectResultCreated) ProbeResults() types.ProbeResultContainer { return r.probeResults } @@ -98,10 +111,10 @@ type ObjectResultUpdated struct { func newObjectResultUpdated( obj Object, diverged CompareResult, - probes map[string]types.Prober, + options types.ObjectReconcileOptions, ) ObjectResult { return ObjectResultUpdated{ - normalResult: newNormalObjectResult(ActionUpdated, obj, diverged, probes), + normalResult: newNormalObjectResult(ActionUpdated, obj, diverged, options), } } @@ -113,10 +126,10 @@ type ObjectResultProgressed struct { func newObjectResultProgressed( obj Object, diverged CompareResult, - probes map[string]types.Prober, + options types.ObjectReconcileOptions, ) ObjectResult { return ObjectResultProgressed{ - normalResult: newNormalObjectResult(ActionProgressed, obj, diverged, probes), + normalResult: newNormalObjectResult(ActionProgressed, obj, diverged, options), } } @@ -128,10 +141,10 @@ type ObjectResultIdle struct { func newObjectResultIdle( obj Object, diverged CompareResult, - probes map[string]types.Prober, + options types.ObjectReconcileOptions, ) ObjectResult { return ObjectResultIdle{ - normalResult: newNormalObjectResult(ActionIdle, obj, diverged, probes), + normalResult: newNormalObjectResult(ActionIdle, obj, diverged, options), } } @@ -143,31 +156,37 @@ type ObjectResultRecovered struct { func newObjectResultRecovered( obj Object, diverged CompareResult, - probes map[string]types.Prober, + options types.ObjectReconcileOptions, ) ObjectResult { return ObjectResultRecovered{ - normalResult: newNormalObjectResult(ActionRecovered, obj, diverged, probes), + normalResult: newNormalObjectResult(ActionRecovered, obj, diverged, options), } } type normalResult struct { action Action obj Object - probeResults map[string]ObjectProbeResult + probeResults types.ProbeResultContainer compareResult CompareResult + options types.ObjectReconcileOptions } func newNormalObjectResult( action Action, obj Object, compResult CompareResult, - probes map[string]types.Prober, + options types.ObjectReconcileOptions, ) normalResult { + if action == ActionCreated { + panic("use newObjectResultCreated, instead") + } + return normalResult{ obj: obj, action: action, - probeResults: runProbes(obj, probes), + probeResults: runProbes(obj, options.Probes), compareResult: compResult, + options: options, } } @@ -188,22 +207,22 @@ func (r normalResult) CompareResult() CompareResult { return r.compareResult } -// Probe returns the results from the given object Probe. -func (r normalResult) Probes() map[string]ObjectProbeResult { +// ProbeResults returns the results from the given object Probe. +func (r normalResult) ProbeResults() types.ProbeResultContainer { return r.probeResults } -// Success returns true when the operation is considered successful. -// Operations are considered a success, when the object reflects desired state, -// is owned by the right controller and passes the given probe. -func (r normalResult) Success() bool { - for _, res := range r.probeResults { - if !res.Success { - return false - } - } +// IsPaused returns true when the WithPaused option has been set. +func (r normalResult) IsPaused() bool { + return r.options.Paused +} - return true +// IsComplete returns true when: +// - the operation has not been paused +// - there has been no collision +// - the progression probe succeeded. +func (r normalResult) IsComplete() bool { + return isComplete(r.action, r.probeResults, r.options) } // String returns a human readable description of the Result. @@ -244,12 +263,12 @@ func newObjectResultConflict( obj Object, diverged CompareResult, conflictingOwner *metav1.OwnerReference, - probes map[string]types.Prober, + options types.ObjectReconcileOptions, ) ObjectResult { return ObjectResultCollision{ normalResult: newNormalObjectResult( ActionCollision, - obj, diverged, probes, + obj, diverged, options, ), conflictingOwner: conflictingOwner, } @@ -280,16 +299,21 @@ func reportStart(or ObjectResult) string { panic(err) } + actionStr := "Action" + if or.IsPaused() { + actionStr = "Action (PAUSED)" + } + gvk := obj.GetObjectKind().GroupVersionKind() msg := fmt.Sprintf( "Object %s.%s %s/%s\n"+ - `Action: %q`+"\n", + `%s: %q`+"\n", gvk.Kind, gvk.GroupVersion().String(), obj.GetNamespace(), obj.GetName(), - or.Action(), + actionStr, or.Action(), ) - probes := or.Probes() + probes := or.ProbeResults() probeTypes := make([]string, 0, len(probes)) for k := range probes { @@ -304,29 +328,73 @@ func reportStart(or ObjectResult) string { for _, probeType := range probeTypes { probeRes := probes[probeType] - if probeRes.Success { + switch probeRes.Status { + case types.ProbeStatusTrue: msg += fmt.Sprintf("- %s: Succeeded\n", probeType) - } else { + case types.ProbeStatusFalse: msg += fmt.Sprintf("- %s: Failed\n", probeType) - for _, m := range probeRes.Messages { - msg += " - " + m + "\n" - } + case types.ProbeStatusUnknown: + msg += fmt.Sprintf("- %s: Unknown\n", probeType) + } + + for _, m := range probeRes.Messages { + msg += " - " + m + "\n" } } return msg } -func runProbes(obj Object, probes map[string]types.Prober) map[string]ObjectProbeResult { - results := map[string]ObjectProbeResult{} +func runProbes(obj Object, probes map[string]types.Prober) types.ProbeResultContainer { + results := types.ProbeResultContainer{} for t, probe := range probes { - s, msgs := probe.Probe(obj) - results[t] = ObjectProbeResult{ - Success: s, - Messages: msgs, - } + results[t] = probe.Probe(obj) } return results } + +// isComplete returns true when: +// - the reconciler has been paused and action is "Idle" or "Progressed" +// - there has been no collision +// - the progression probe succeeded +// it returns false when: +// - the reconciler has been paused and action is not "Idle" or "Progressed" +// - there has been a collision +// - the progression probe failed or returned unknown. +func isComplete( + action Action, + probeResults types.ProbeResultContainer, + options types.ObjectReconcileOptions, +) bool { + if action == ActionCollision { + // Collisions always report as incomplete. + return false + } + + if options.Paused { + switch action { + case ActionIdle, ActionProgressed: + // Even though we are paused, there the object is ok -> "Idle" or + // no longer "our problem" -> "Progressed" + + default: + // If Paused: + // Action == "Created": the object has NOT been created + // Action == "Updated": the object has NOT been updated + // Action == "Recovered": the object has NOT been recovered + return false + } + } + + if options.Probes[types.ProgressProbeType] == nil { + // no probe defined, skip + return true + } + + // Only check the progress probe and only count explicit true for completeness: + r := probeResults.Type(types.ProgressProbeType) + + return r.Status == types.ProbeStatusTrue +} diff --git a/vendor/pkg.package-operator.run/boxcutter/machinery/revision.go b/vendor/pkg.package-operator.run/boxcutter/machinery/revision.go index c6eba0ca6..84dfc854f 100644 --- a/vendor/pkg.package-operator.run/boxcutter/machinery/revision.go +++ b/vendor/pkg.package-operator.run/boxcutter/machinery/revision.go @@ -64,7 +64,7 @@ type RevisionResult interface { // InTransition returns true if the Phase has not yet fully rolled out, // if the phase has objects progressed to a new revision or // if objects have unresolved conflicts. - InTransistion() bool + InTransition() bool // IsComplete returns true when all objects have // successfully been reconciled and pass their probes. IsComplete() bool @@ -88,9 +88,9 @@ func (r *revisionResult) GetValidationError() *validation.RevisionValidationErro // InTransition returns true if the Phase has not yet fully rolled out, // if the phase has objects progressed to a new revision or // if objects have unresolved conflicts. -func (r *revisionResult) InTransistion() bool { +func (r *revisionResult) InTransition() bool { for _, p := range r.phasesResults { - if p.InTransistion() { + if p.InTransition() { return true } } @@ -145,7 +145,7 @@ func (r *revisionResult) GetPhases() []PhaseResult { func (r *revisionResult) String() string { out := fmt.Sprintf( "Revision\nComplete: %t\nIn Transition: %t\n", - r.IsComplete(), r.InTransistion(), + r.IsComplete(), r.InTransition(), ) if err := r.GetValidationError(); err != nil { diff --git a/vendor/pkg.package-operator.run/boxcutter/machinery/types/options.go b/vendor/pkg.package-operator.run/boxcutter/machinery/types/options.go index f4e963c41..3ea290366 100644 --- a/vendor/pkg.package-operator.run/boxcutter/machinery/types/options.go +++ b/vendor/pkg.package-operator.run/boxcutter/machinery/types/options.go @@ -127,7 +127,9 @@ var ( ) // ObjectTeardownOptions holds configuration options changing object teardown. -type ObjectTeardownOptions struct{} +type ObjectTeardownOptions struct { + Orphan bool +} // Default sets empty Option fields to their default value. func (opts *ObjectTeardownOptions) Default() {} @@ -214,27 +216,6 @@ func (p WithPaused) ApplyToRevisionReconcileOptions(opts *RevisionReconcileOptio opts.DefaultPhaseOptions = append(opts.DefaultPhaseOptions, p) } -// ProgressProbeType is a well-known probe type used to guard phase progression. -const ProgressProbeType = "Progress" - -// Prober needs to be implemented by any probing implementation. -type Prober interface { - Probe(obj client.Object) (success bool, messages []string) -} - -// ProbeFunc wraps the given function to work with the Prober interface. -func ProbeFunc(fn func(obj client.Object) (success bool, messages []string)) Prober { - return &probeFn{Fn: fn} -} - -type probeFn struct { - Fn func(obj client.Object) (success bool, messages []string) -} - -func (p *probeFn) Probe(obj client.Object) (success bool, messages []string) { - return p.Fn(obj) -} - // WithProbe registers the given probe to evaluate state of objects. func WithProbe(t string, probe Prober) ObjectReconcileOption { return &optionFn{ @@ -247,6 +228,17 @@ func WithProbe(t string, probe Prober) ObjectReconcileOption { } } +// WithOrphan exclude objects from Teardown. +// use it as WithObjectTeardownOptions(obj, WithOrphan()) to exclude individual objects or +// use it as WithPhaseTeardownOptions("my-phase", WithOrphan()) to exclude a whole phase. +func WithOrphan() ObjectTeardownOption { + return &teardownOptionFn{ + fn: func(opts *ObjectTeardownOptions) { + opts.Orphan = true + }, + } +} + type withObjectReconcileOptions struct { obj ObjectRef opts []ObjectReconcileOption @@ -363,3 +355,22 @@ func (p *optionFn) ApplyToPhaseReconcileOptions(opts *PhaseReconcileOptions) { func (p *optionFn) ApplyToRevisionReconcileOptions(opts *RevisionReconcileOptions) { opts.DefaultPhaseOptions = append(opts.DefaultPhaseOptions, p) } + +type teardownOptionFn struct { + fn func(opts *ObjectTeardownOptions) +} + +// ApplyToObjectTeardownOptions implements ObjectTeardownOption. +func (p *teardownOptionFn) ApplyToObjectTeardownOptions(opts *ObjectTeardownOptions) { + p.fn(opts) +} + +// ApplyToPhaseTeardownOptions implements PhaseOption. +func (p *teardownOptionFn) ApplyToPhaseTeardownOptions(opts *PhaseTeardownOptions) { + opts.DefaultObjectOptions = append(opts.DefaultObjectOptions, p) +} + +// ApplyToRevisionTeardownOptions implements RevisionTeardownOptions. +func (p *teardownOptionFn) ApplyToRevisionTeardownOptions(opts *RevisionTeardownOptions) { + opts.DefaultPhaseOptions = append(opts.DefaultPhaseOptions, p) +} diff --git a/vendor/pkg.package-operator.run/boxcutter/machinery/types/probing.go b/vendor/pkg.package-operator.run/boxcutter/machinery/types/probing.go new file mode 100644 index 000000000..8841cba7d --- /dev/null +++ b/vendor/pkg.package-operator.run/boxcutter/machinery/types/probing.go @@ -0,0 +1,65 @@ +package types + +import ( + "fmt" + + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ProgressProbeType is a well-known probe type used to guard phase progression. +const ProgressProbeType = "Progress" + +// Prober needs to be implemented by any probing implementation. +type Prober interface { + Probe(obj client.Object) ProbeResult +} + +// ProbeStatus may be "True", "False" or "Unknown". +type ProbeStatus string + +const ( + // ProbeStatusTrue means the probe has succeeded. + ProbeStatusTrue ProbeStatus = "True" + // ProbeStatusFalse means the probe has failed. + ProbeStatusFalse ProbeStatus = "False" + // ProbeStatusUnknown means the probe was unable to determine the state. + ProbeStatusUnknown ProbeStatus = "Unknown" +) + +// ProbeResult combines a ProbeState with human readable messages describing how the state happened. +type ProbeResult struct { + // Status of the probe result, one of True, False, Unknown. + Status ProbeStatus + // Messages are human readable status descriptions containing details about the reported state. + Messages []string +} + +// ProbeResultContainer holds results from multiple probes. +type ProbeResultContainer map[string]ProbeResult + +// Type returns the probe result for the given probe type. +func (c ProbeResultContainer) Type(t string) ProbeResult { + if r, ok := c[t]; ok { + return r + } + + return ProbeResult{ + Status: ProbeStatusUnknown, + Messages: []string{ + fmt.Sprintf("no such probe %q", t), + }, + } +} + +// ProbeFunc wraps the given function to work with the Prober interface. +func ProbeFunc(fn func(obj client.Object) ProbeResult) Prober { + return &probeFn{Fn: fn} +} + +type probeFn struct { + Fn func(obj client.Object) ProbeResult +} + +func (p *probeFn) Probe(obj client.Object) ProbeResult { + return p.Fn(obj) +} diff --git a/vendor/pkg.package-operator.run/boxcutter/probing/cel.go b/vendor/pkg.package-operator.run/boxcutter/probing/cel.go index 32bb23054..751549150 100644 --- a/vendor/pkg.package-operator.run/boxcutter/probing/cel.go +++ b/vendor/pkg.package-operator.run/boxcutter/probing/cel.go @@ -63,17 +63,30 @@ func NewCELProbe(rule, message string) ( } // Probe executes the probe. -func (p *CELProbe) Probe(obj client.Object) (success bool, messages []string) { +func (p *CELProbe) Probe(obj client.Object) Result { return probeUnstructuredSingleMsg(obj, p.probe) } -func (p *CELProbe) probe(obj *unstructured.Unstructured) (success bool, message string) { +func (p *CELProbe) probe(obj *unstructured.Unstructured) Result { val, _, err := p.Program.Eval(map[string]any{ "self": obj.Object, }) if err != nil { - return false, fmt.Sprintf("CEL program failed: %v", err) + return Result{ + Status: StatusUnknown, + Messages: []string{fmt.Sprintf("CEL program failed: %v", err)}, + } } - return val.Value().(bool), p.Message + success := val.Value().(bool) + if success { + return Result{ + Status: StatusTrue, + } + } + + return Result{ + Status: StatusFalse, + Messages: []string{p.Message}, + } } diff --git a/vendor/pkg.package-operator.run/boxcutter/probing/condition.go b/vendor/pkg.package-operator.run/boxcutter/probing/condition.go index 6196297ee..8cb297fdb 100644 --- a/vendor/pkg.package-operator.run/boxcutter/probing/condition.go +++ b/vendor/pkg.package-operator.run/boxcutter/probing/condition.go @@ -15,36 +15,37 @@ type ConditionProbe struct { var _ Prober = (*ConditionProbe)(nil) // Probe executes the probe. -func (cp *ConditionProbe) Probe(obj client.Object) (success bool, messages []string) { +func (cp *ConditionProbe) Probe(obj client.Object) Result { return probeUnstructuredSingleMsg(obj, cp.probe) } -func (cp *ConditionProbe) probe(obj *unstructured.Unstructured) (success bool, message string) { - defer func() { - if success { - return - } - // add probed condition type and status as context to error message. - message = fmt.Sprintf("condition %q == %q: %s", cp.Type, cp.Status, message) - }() - +func (cp *ConditionProbe) probe(obj *unstructured.Unstructured) Result { rawConditions, exist, err := unstructured.NestedFieldNoCopy( obj.Object, "status", "conditions") conditions, ok := rawConditions.([]any) if err != nil || !exist { - return false, "missing .status.conditions" + return Result{ + Status: StatusUnknown, + Messages: []string{"missing .status.conditions"}, + } } if !ok { - return false, "malformed" + return Result{ + Status: StatusUnknown, + Messages: []string{"malformed .status.conditions"}, + } } for _, condI := range conditions { cond, ok := condI.(map[string]any) if !ok { // no idea what this is supposed to be - return false, "malformed" + return Result{ + Status: StatusUnknown, + Messages: []string{"malformed .status.conditions"}, + } } if cond["type"] != cp.Type { @@ -56,15 +57,27 @@ func (cp *ConditionProbe) probe(obj *unstructured.Unstructured) (success bool, m if observedGeneration, ok, err := unstructured.NestedInt64( cond, "observedGeneration", ); err == nil && ok && observedGeneration != obj.GetGeneration() { - return false, "outdated" + return Result{ + Status: StatusUnknown, + Messages: []string{fmt.Sprintf(`.status.condition["%s"] outdated`, cp.Type)}, + } } if cond["status"] == cp.Status { - return true, "" + return Result{ + Status: StatusTrue, + Messages: []string{fmt.Sprintf(`.status.condition["%s"] is %s`, cp.Type, cp.Status)}, + } } - return false, "wrong status" + return Result{ + Status: StatusFalse, + Messages: []string{fmt.Sprintf(`.status.condition["%s"] is %s`, cp.Type, cond["status"])}, + } } - return false, "not reported" + return Result{ + Status: StatusUnknown, + Messages: []string{fmt.Sprintf(`missing .status.condition["%s"]`, cp.Type)}, + } } diff --git a/vendor/pkg.package-operator.run/boxcutter/probing/fieldsequal.go b/vendor/pkg.package-operator.run/boxcutter/probing/fieldsequal.go index 893af9979..ce504b2c6 100644 --- a/vendor/pkg.package-operator.run/boxcutter/probing/fieldsequal.go +++ b/vendor/pkg.package-operator.run/boxcutter/probing/fieldsequal.go @@ -1,6 +1,7 @@ package probing import ( + "encoding/json" "fmt" "strings" @@ -17,35 +18,44 @@ type FieldsEqualProbe struct { var _ Prober = (*FieldsEqualProbe)(nil) // Probe executes the probe. -func (fe *FieldsEqualProbe) Probe(obj client.Object) (success bool, messages []string) { +func (fe *FieldsEqualProbe) Probe(obj client.Object) Result { return probeUnstructuredSingleMsg(obj, fe.probe) } -func (fe *FieldsEqualProbe) probe(obj *unstructured.Unstructured) (success bool, message string) { +func (fe *FieldsEqualProbe) probe(obj *unstructured.Unstructured) Result { fieldAPath := strings.Split(strings.Trim(fe.FieldA, "."), ".") fieldBPath := strings.Split(strings.Trim(fe.FieldB, "."), ".") - defer func() { - if success { - return - } - // add probed field paths as context to error message. - message = fmt.Sprintf(`"%v" == "%v": %s`, fe.FieldA, fe.FieldB, message) - }() - fieldAVal, ok, err := unstructured.NestedFieldCopy(obj.Object, fieldAPath...) if err != nil || !ok { - return false, fmt.Sprintf(`"%v" missing`, fe.FieldA) + return Result{ + Status: StatusFalse, + Messages: []string{fmt.Sprintf(`"%v" missing`, fe.FieldA)}, + } } fieldBVal, ok, err := unstructured.NestedFieldCopy(obj.Object, fieldBPath...) if err != nil || !ok { - return false, fmt.Sprintf(`"%v" missing`, fe.FieldB) + return Result{ + Status: StatusFalse, + Messages: []string{fmt.Sprintf(`"%v" missing`, fe.FieldB)}, + } } if !equality.Semantic.DeepEqual(fieldAVal, fieldBVal) { - return false, fmt.Sprintf(`"%v" != "%v"`, fieldAVal, fieldBVal) + //nolint:errchkjson + fieldAJSON, _ := json.Marshal(fieldAVal) + //nolint:errchkjson + fieldBJSON, _ := json.Marshal(fieldBVal) + + return Result{ + Status: StatusFalse, + Messages: []string{fmt.Sprintf(`"%s" != "%s" expected: %s got: %s`, fe.FieldA, fe.FieldB, fieldAJSON, fieldBJSON)}, + } } - return true, "" + return Result{ + Status: StatusTrue, + Messages: []string{fmt.Sprintf(`"%s" == "%s"`, fe.FieldA, fe.FieldB)}, + } } diff --git a/vendor/pkg.package-operator.run/boxcutter/probing/observedgeneration.go b/vendor/pkg.package-operator.run/boxcutter/probing/observedgeneration.go index acef47bf0..3134175fc 100644 --- a/vendor/pkg.package-operator.run/boxcutter/probing/observedgeneration.go +++ b/vendor/pkg.package-operator.run/boxcutter/probing/observedgeneration.go @@ -15,12 +15,12 @@ type ObservedGenerationProbe struct { var _ Prober = (*ObservedGenerationProbe)(nil) // Probe executes the probe. -func (cg *ObservedGenerationProbe) Probe(obj client.Object) (success bool, messages []string) { +func (cg *ObservedGenerationProbe) Probe(obj client.Object) Result { unstr := toUnstructured(obj) if observedGeneration, ok, err := unstructured.NestedInt64( unstr.Object, "status", "observedGeneration", ); err == nil && ok && observedGeneration != obj.GetGeneration() { - return false, []string{".status outdated"} + return Result{Status: StatusUnknown, Messages: []string{".status outdated"}} } return cg.Prober.Probe(obj) diff --git a/vendor/pkg.package-operator.run/boxcutter/probing/probe.go b/vendor/pkg.package-operator.run/boxcutter/probing/probe.go index 7768ef09d..4c7df6d72 100644 --- a/vendor/pkg.package-operator.run/boxcutter/probing/probe.go +++ b/vendor/pkg.package-operator.run/boxcutter/probing/probe.go @@ -10,30 +10,89 @@ import ( "pkg.package-operator.run/boxcutter/machinery/types" ) -// Prober check Kubernetes objects for certain conditions and report success or failure with failure messages. +// Prober needs to be implemented by any probing implementation. type Prober = types.Prober -// And combines multiple Prober, only passing if all given Probers succeed. -// Messages of failing Probers will be joined together. +// Status may be "True", "False" or "Unknown". +type Status = types.ProbeStatus + +const ( + // StatusTrue means the probe has succeeded. + StatusTrue Status = types.ProbeStatusTrue + // StatusFalse means the probe has failed. + StatusFalse Status = types.ProbeStatusFalse + // StatusUnknown means the probe was unable to determine the state. + StatusUnknown Status = types.ProbeStatusUnknown +) + +// Result combines a probe status with human readable messages describing how the state happened. +type Result = types.ProbeResult + +// And combines multiple Prober. +// The returned status is: +// - True if all ProbeResults are True +// - False if at least one ProbeResult is False and none are Unknown +// - Unknown if at least one ProbeResult is Unknown +// Messages of the same Status will be combined. type And []Prober var _ Prober = (And)(nil) -// Probe executes the probe. -func (p And) Probe(obj client.Object) (success bool, messages []string) { - var allMsgs []string +// Probe runs probes against the given object and returns the result. +func (p And) Probe(obj client.Object) Result { + var unknownMsgs, trueMsgs, falseMsgs []string + + var statusUnknown, statusFalse bool for _, probe := range p { - if success, msgs := probe.Probe(obj); !success { - allMsgs = append(allMsgs, msgs...) + r := probe.Probe(obj) + switch r.Status { + case StatusTrue: + trueMsgs = append(trueMsgs, r.Messages...) + case StatusFalse: + statusFalse = true + + falseMsgs = append(falseMsgs, r.Messages...) + case StatusUnknown: + statusUnknown = true + + unknownMsgs = append(unknownMsgs, r.Messages...) + } + } + + if statusUnknown { + return Result{ + Status: StatusUnknown, + Messages: unknownMsgs, } } - if len(allMsgs) > 0 { - return false, allMsgs + if statusFalse { + return Result{ + Status: StatusFalse, + Messages: falseMsgs, + } + } + + return Result{ + Status: StatusTrue, + Messages: trueMsgs, } +} + +// TrueResult is a helper returning a True ProbeResult with the given messages. +func TrueResult(msgs ...string) Result { + return Result{Status: StatusTrue, Messages: msgs} +} + +// FalseResult is a helper returning a False ProbeResult with the given messages. +func FalseResult(msgs ...string) Result { + return Result{Status: StatusFalse, Messages: msgs} +} - return true, nil +// UnknownResult is a helper returning a Unknown ProbeResult with the given messages. +func UnknownResult(msgs ...string) Result { + return Result{Status: StatusUnknown, Messages: msgs} } func toUnstructured(obj client.Object) *unstructured.Unstructured { @@ -47,14 +106,9 @@ func toUnstructured(obj client.Object) *unstructured.Unstructured { func probeUnstructuredSingleMsg( obj client.Object, - probe func(obj *unstructured.Unstructured) (success bool, message string), -) (success bool, messages []string) { + probe func(obj *unstructured.Unstructured) Result, +) Result { unst := toUnstructured(obj) - success, msg := probe(unst) - if success { - return success, nil - } - - return success, []string{msg} + return probe(unst) } diff --git a/vendor/pkg.package-operator.run/boxcutter/probing/selectors.go b/vendor/pkg.package-operator.run/boxcutter/probing/selectors.go index 2a6a7c924..9a1d04152 100644 --- a/vendor/pkg.package-operator.run/boxcutter/probing/selectors.go +++ b/vendor/pkg.package-operator.run/boxcutter/probing/selectors.go @@ -17,7 +17,7 @@ type GroupKindSelector struct { var _ Prober = (*GroupKindSelector)(nil) // Probe executes the probe. -func (kp *GroupKindSelector) Probe(obj client.Object) (success bool, messages []string) { +func (kp *GroupKindSelector) Probe(obj client.Object) Result { gk := obj.GetObjectKind().GroupVersionKind().GroupKind() if kp.GroupKind == gk { return kp.Prober.Probe(obj) @@ -25,7 +25,9 @@ func (kp *GroupKindSelector) Probe(obj client.Object) (success bool, messages [] // We want to _skip_ objects, that don't match. // So this probe succeeds by default. - return true, nil + return Result{ + Status: StatusTrue, + } } // LabelSelector wraps a Probe object and only executes the probe @@ -39,11 +41,13 @@ type LabelSelector struct { var _ Prober = (*LabelSelector)(nil) // Probe executes the probe. -func (ss *LabelSelector) Probe(obj client.Object) (success bool, messages []string) { +func (ss *LabelSelector) Probe(obj client.Object) Result { if !ss.Matches(labels.Set(obj.GetLabels())) { // We want to _skip_ objects, that don't match. // So this probe succeeds by default. - return true, nil + return Result{ + Status: StatusTrue, + } } return ss.Prober.Probe(obj) diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/conv.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/conv.go index 374e02c37..ba2112558 100644 --- a/vendor/sigs.k8s.io/controller-tools/pkg/crd/conv.go +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/conv.go @@ -4,7 +4,7 @@ import ( "fmt" apiextinternal "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" - apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -18,13 +18,13 @@ func init() { if err := apiextinternal.AddToScheme(conversionScheme); err != nil { panic("must be able to add internal apiextensions to the CRD conversion Scheme") } - if err := apiext.AddToScheme(conversionScheme); err != nil { + if err := apiextensionsv1.AddToScheme(conversionScheme); err != nil { panic("must be able to add apiextensions/v1 to the CRD conversion Scheme") } } // AsVersion converts a CRD from the canonical internal form (currently v1) to some external form. -func AsVersion(original apiext.CustomResourceDefinition, gv schema.GroupVersion) (runtime.Object, error) { +func AsVersion(original apiextensionsv1.CustomResourceDefinition, gv schema.GroupVersion) (runtime.Object, error) { // TODO: Do we need to keep maintaining this conversion function // post 1.22 when only CRDv1 is served by the apiserver? if gv == apiextv1beta1.SchemeGroupVersion { diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/desc_visitor.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/desc_visitor.go index 1a5698bec..e788b4d42 100644 --- a/vendor/sigs.k8s.io/controller-tools/pkg/crd/desc_visitor.go +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/desc_visitor.go @@ -20,13 +20,13 @@ import ( "strings" "unicode" - apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" ) // TruncateDescription truncates the description of fields in given schema if it // exceeds maxLen. // It tries to chop off the description at the closest sentence boundary. -func TruncateDescription(schema *apiext.JSONSchemaProps, maxLen int) { +func TruncateDescription(schema *apiextensionsv1.JSONSchemaProps, maxLen int) { EditSchema(schema, descVisitor{maxLen: maxLen}) } @@ -37,7 +37,7 @@ type descVisitor struct { maxLen int } -func (v descVisitor) Visit(schema *apiext.JSONSchemaProps) SchemaVisitor { +func (v descVisitor) Visit(schema *apiextensionsv1.JSONSchemaProps) SchemaVisitor { if schema == nil { return v } diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/flatten.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/flatten.go index cf02c9b71..10a1e32fb 100644 --- a/vendor/sigs.k8s.io/controller-tools/pkg/crd/flatten.go +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/flatten.go @@ -19,11 +19,11 @@ package crd import ( "fmt" "reflect" - "sort" + "slices" "strings" "sync" - apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "sigs.k8s.io/controller-tools/pkg/loader" ) @@ -38,7 +38,7 @@ type ErrorRecorder interface { // isOrNil checks if val is nil if val is of a nillable type, otherwise, // it compares val to valInt (which should probably be the zero value). -func isOrNil(val reflect.Value, valInt interface{}, zeroInt interface{}) bool { +func isOrNil(val reflect.Value, valInt any, zeroInt any) bool { switch valKind := val.Kind(); valKind { case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: return val.IsNil() @@ -49,7 +49,7 @@ func isOrNil(val reflect.Value, valInt interface{}, zeroInt interface{}) bool { // flattenAllOfInto copies properties from src to dst, then copies the properties // of each item in src's allOf to dst's properties as well. -func flattenAllOfInto(dst *apiext.JSONSchemaProps, src apiext.JSONSchemaProps, errRec ErrorRecorder) { +func flattenAllOfInto(dst *apiextensionsv1.JSONSchemaProps, src apiextensionsv1.JSONSchemaProps, errRec ErrorRecorder) { if len(src.AllOf) > 0 { for _, embedded := range src.AllOf { flattenAllOfInto(dst, embedded, errRec) @@ -60,9 +60,9 @@ func flattenAllOfInto(dst *apiext.JSONSchemaProps, src apiext.JSONSchemaProps, e srcVal := reflect.ValueOf(src) typ := dstVal.Type() - srcRemainder := apiext.JSONSchemaProps{} + srcRemainder := apiextensionsv1.JSONSchemaProps{} srcRemVal := reflect.Indirect(reflect.ValueOf(&srcRemainder)) - dstRemainder := apiext.JSONSchemaProps{} + dstRemainder := apiextensionsv1.JSONSchemaProps{} dstRemVal := reflect.Indirect(reflect.ValueOf(&dstRemainder)) hoisted := false @@ -104,8 +104,8 @@ func flattenAllOfInto(dst *apiext.JSONSchemaProps, src apiext.JSONSchemaProps, e switch fieldName { case "Properties": // merge if possible, use all of otherwise - srcMap := srcInt.(map[string]apiext.JSONSchemaProps) - dstMap := dstInt.(map[string]apiext.JSONSchemaProps) + srcMap := srcInt.(map[string]apiextensionsv1.JSONSchemaProps) + dstMap := dstInt.(map[string]apiextensionsv1.JSONSchemaProps) for k, v := range srcMap { dstProp, exists := dstMap[k] @@ -132,14 +132,14 @@ func flattenAllOfInto(dst *apiext.JSONSchemaProps, src apiext.JSONSchemaProps, e // - Definitions: common named validation sets that can be references (merge, bail if duplicate) case "AdditionalProperties": // as of the time of writing, `allows: false` is not allowed, so we don't have to handle it - srcProps := srcInt.(*apiext.JSONSchemaPropsOrBool) + srcProps := srcInt.(*apiextensionsv1.JSONSchemaPropsOrBool) if srcProps.Schema == nil { // nothing to merge continue } - dstProps := dstInt.(*apiext.JSONSchemaPropsOrBool) + dstProps := dstInt.(*apiextensionsv1.JSONSchemaPropsOrBool) if dstProps.Schema == nil { - dstProps.Schema = &apiext.JSONSchemaProps{} + dstProps.Schema = &apiextensionsv1.JSONSchemaProps{} } flattenAllOfInto(dstProps.Schema, *srcProps.Schema, errRec) case "XPreserveUnknownFields": @@ -176,7 +176,7 @@ func flattenAllOfInto(dst *apiext.JSONSchemaProps, src apiext.JSONSchemaProps, e dst.Required = append(dst.Required, req) } // be deterministic - sort.Strings(dst.Required) + slices.Sort(dst.Required) } } @@ -188,7 +188,7 @@ type allOfVisitor struct { errRec ErrorRecorder } -func (v *allOfVisitor) Visit(schema *apiext.JSONSchemaProps) SchemaVisitor { +func (v *allOfVisitor) Visit(schema *apiextensionsv1.JSONSchemaProps) SchemaVisitor { if schema == nil { return v } @@ -210,7 +210,7 @@ func (v *allOfVisitor) Visit(schema *apiext.JSONSchemaProps) SchemaVisitor { // FlattenEmbedded flattens embedded fields (represented via AllOf) which have // already had their references resolved into simple properties in the containing // schema. -func FlattenEmbedded(schema *apiext.JSONSchemaProps, errRec ErrorRecorder) *apiext.JSONSchemaProps { +func FlattenEmbedded(schema *apiextensionsv1.JSONSchemaProps, errRec ErrorRecorder) *apiextensionsv1.JSONSchemaProps { outSchema := schema.DeepCopy() EditSchema(outSchema, &allOfVisitor{errRec: errRec}) return outSchema @@ -226,13 +226,13 @@ type Flattener struct { LookupReference func(ref string, contextPkg *loader.Package) (TypeIdent, error) // flattenedTypes hold the flattened version of each seen type for later reuse. - flattenedTypes map[TypeIdent]apiext.JSONSchemaProps + flattenedTypes map[TypeIdent]apiextensionsv1.JSONSchemaProps initOnce sync.Once } func (f *Flattener) init() { f.initOnce.Do(func() { - f.flattenedTypes = make(map[TypeIdent]apiext.JSONSchemaProps) + f.flattenedTypes = make(map[TypeIdent]apiextensionsv1.JSONSchemaProps) if f.LookupReference == nil { f.LookupReference = identFromRef } @@ -240,13 +240,13 @@ func (f *Flattener) init() { } // cacheType saves the flattened version of the given type for later reuse -func (f *Flattener) cacheType(typ TypeIdent, schema apiext.JSONSchemaProps) { +func (f *Flattener) cacheType(typ TypeIdent, schema apiextensionsv1.JSONSchemaProps) { f.init() f.flattenedTypes[typ] = schema } // loadUnflattenedSchema fetches a fresh, unflattened schema from the parser. -func (f *Flattener) loadUnflattenedSchema(typ TypeIdent) (*apiext.JSONSchemaProps, error) { +func (f *Flattener) loadUnflattenedSchema(typ TypeIdent) (*apiextensionsv1.JSONSchemaProps, error) { f.Parser.NeedSchemaFor(typ) baseSchema, found := f.Parser.Schemata[typ] @@ -258,7 +258,7 @@ func (f *Flattener) loadUnflattenedSchema(typ TypeIdent) (*apiext.JSONSchemaProp // FlattenType flattens the given pre-loaded type, removing any references from it. // It deep-copies the schema first, so it won't affect the parser's version of the schema. -func (f *Flattener) FlattenType(typ TypeIdent) *apiext.JSONSchemaProps { +func (f *Flattener) FlattenType(typ TypeIdent) *apiextensionsv1.JSONSchemaProps { f.init() if cachedSchema, isCached := f.flattenedTypes[typ]; isCached { return &cachedSchema @@ -275,7 +275,7 @@ func (f *Flattener) FlattenType(typ TypeIdent) *apiext.JSONSchemaProps { // FlattenSchema flattens the given schema, removing any references. // It deep-copies the schema first, so the input schema won't be affected. -func (f *Flattener) FlattenSchema(baseSchema apiext.JSONSchemaProps, currentPackage *loader.Package) *apiext.JSONSchemaProps { +func (f *Flattener) FlattenSchema(baseSchema apiextensionsv1.JSONSchemaProps, currentPackage *loader.Package) *apiextensionsv1.JSONSchemaProps { resSchema := baseSchema.DeepCopy() EditSchema(resSchema, &flattenVisitor{ Flattener: f, @@ -332,7 +332,7 @@ func identFromRef(ref string, contextPkg *loader.Package) (TypeIdent, error) { // preserveFields copies documentation fields from src into dst, preserving // field-level documentation when flattening, and preserving field-level validation // as allOf entries. -func preserveFields(dst *apiext.JSONSchemaProps, src apiext.JSONSchemaProps) { +func preserveFields(dst *apiextensionsv1.JSONSchemaProps, src apiextensionsv1.JSONSchemaProps) { srcDesc := src.Description srcTitle := src.Title srcExDoc := src.ExternalDocs @@ -341,8 +341,8 @@ func preserveFields(dst *apiext.JSONSchemaProps, src apiext.JSONSchemaProps) { src.Description, src.Title, src.ExternalDocs, src.Example = "", "", nil, nil src.Ref = nil - *dst = apiext.JSONSchemaProps{ - AllOf: []apiext.JSONSchemaProps{*dst, src}, + *dst = apiextensionsv1.JSONSchemaProps{ + AllOf: []apiextensionsv1.JSONSchemaProps{*dst, src}, // keep these, in case the source field doesn't specify anything useful Description: dst.Description, @@ -371,11 +371,11 @@ type flattenVisitor struct { currentPackage *loader.Package currentType *TypeIdent - currentSchema *apiext.JSONSchemaProps - originalField apiext.JSONSchemaProps + currentSchema *apiextensionsv1.JSONSchemaProps + originalField apiextensionsv1.JSONSchemaProps } -func (f *flattenVisitor) Visit(baseSchema *apiext.JSONSchemaProps) SchemaVisitor { +func (f *flattenVisitor) Visit(baseSchema *apiextensionsv1.JSONSchemaProps) SchemaVisitor { if baseSchema == nil { // end-of-node marker, cache the results if f.currentType != nil { @@ -421,7 +421,7 @@ func (f *flattenVisitor) Visit(baseSchema *apiext.JSONSchemaProps) SchemaVisitor // avoid loops (which shouldn't exist, but just in case) // by marking a nil cached pointer before we start recursing - f.cacheType(refIdent, apiext.JSONSchemaProps{}) + f.cacheType(refIdent, apiextensionsv1.JSONSchemaProps{}) return &flattenVisitor{ Flattener: f.Flattener, diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/gen.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/gen.go index 5fad65a71..dfaff0bff 100644 --- a/vendor/sigs.k8s.io/controller-tools/pkg/crd/gen.go +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/gen.go @@ -20,10 +20,10 @@ import ( "fmt" "go/ast" "go/types" - "sort" + "slices" "strings" - apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/runtime/schema" crdmarkers "sigs.k8s.io/controller-tools/pkg/crd/markers" "sigs.k8s.io/controller-tools/pkg/genall" @@ -108,15 +108,15 @@ func (Generator) RegisterMarkers(into *markers.Registry) error { } // transformRemoveCRDStatus ensures we do not write the CRD status field. -func transformRemoveCRDStatus(obj map[string]interface{}) error { +func transformRemoveCRDStatus(obj map[string]any) error { delete(obj, "status") return nil } // transformPreserveUnknownFields adds spec.preserveUnknownFields=value. -func transformPreserveUnknownFields(value bool) func(map[string]interface{}) error { - return func(obj map[string]interface{}) error { - if spec, ok := obj["spec"].(map[interface{}]interface{}); ok { +func transformPreserveUnknownFields(value bool) func(map[string]any) error { + return func(obj map[string]any) error { + if spec, ok := obj["spec"].(map[any]any); ok { spec["preserveUnknownFields"] = value } return nil @@ -185,9 +185,9 @@ func (g Generator) Generate(ctx *genall.GenerationContext) error { // Prevent the top level metadata for the CRD to be generate regardless of the intention in the arguments FixTopLevelMetadata(crdRaw) - versionedCRDs := make([]interface{}, len(crdVersions)) + versionedCRDs := make([]any, len(crdVersions)) for i, ver := range crdVersions { - conv, err := AsVersion(crdRaw, schema.GroupVersion{Group: apiext.SchemeGroupVersion.Group, Version: ver}) + conv, err := AsVersion(crdRaw, schema.GroupVersion{Group: apiextensionsv1.SchemeGroupVersion.Group, Version: ver}) if err != nil { return err } @@ -195,14 +195,14 @@ func (g Generator) Generate(ctx *genall.GenerationContext) error { } for i, crd := range versionedCRDs { - removeDescriptionFromMetadata(crd.(*apiext.CustomResourceDefinition)) + removeDescriptionFromMetadata(crd.(*apiextensionsv1.CustomResourceDefinition)) var fileName string if i == 0 { fileName = fmt.Sprintf("%s_%s.yaml", crdRaw.Spec.Group, crdRaw.Spec.Names.Plural) } else { fileName = fmt.Sprintf("%s_%s.%s.yaml", crdRaw.Spec.Group, crdRaw.Spec.Names.Plural, crdVersions[i]) } - if err := ctx.WriteYAML(fileName, headerText, []interface{}{crd}, yamlOpts...); err != nil { + if err := ctx.WriteYAML(fileName, headerText, []any{crd}, yamlOpts...); err != nil { return err } } @@ -211,7 +211,7 @@ func (g Generator) Generate(ctx *genall.GenerationContext) error { return nil } -func removeDescriptionFromMetadata(crd *apiext.CustomResourceDefinition) { +func removeDescriptionFromMetadata(crd *apiextensionsv1.CustomResourceDefinition) { for _, versionSpec := range crd.Spec.Versions { if versionSpec.Schema != nil { removeDescriptionFromMetadataProps(versionSpec.Schema.OpenAPIV3Schema) @@ -219,7 +219,7 @@ func removeDescriptionFromMetadata(crd *apiext.CustomResourceDefinition) { } } -func removeDescriptionFromMetadataProps(v *apiext.JSONSchemaProps) { +func removeDescriptionFromMetadataProps(v *apiextensionsv1.JSONSchemaProps) { if m, ok := v.Properties["metadata"]; ok { meta := &m if meta.Description != "" { @@ -230,12 +230,12 @@ func removeDescriptionFromMetadataProps(v *apiext.JSONSchemaProps) { } // FixTopLevelMetadata resets the schema for the top-level metadata field which is needed for CRD validation -func FixTopLevelMetadata(crd apiext.CustomResourceDefinition) { +func FixTopLevelMetadata(crd apiextensionsv1.CustomResourceDefinition) { for _, v := range crd.Spec.Versions { if v.Schema != nil && v.Schema.OpenAPIV3Schema != nil && v.Schema.OpenAPIV3Schema.Properties != nil { schemaProperties := v.Schema.OpenAPIV3Schema.Properties if _, ok := schemaProperties["metadata"]; ok { - schemaProperties["metadata"] = apiext.JSONSchemaProps{Type: "object"} + schemaProperties["metadata"] = apiextensionsv1.JSONSchemaProps{Type: "object"} } } } @@ -243,7 +243,7 @@ func FixTopLevelMetadata(crd apiext.CustomResourceDefinition) { // addAttribution adds attribution info to indicate controller-gen tool was used // to generate this CRD definition along with the version info. -func addAttribution(crd *apiext.CustomResourceDefinition) { +func addAttribution(crd *apiextensionsv1.CustomResourceDefinition) { if crd.ObjectMeta.Annotations == nil { crd.ObjectMeta.Annotations = map[string]string{} } @@ -328,8 +328,8 @@ func FindKubeKinds(parser *Parser, metav1Pkg *loader.Package) []schema.GroupKind for groupKind := range kubeKinds { groupKindList = append(groupKindList, groupKind) } - sort.Slice(groupKindList, func(i, j int) bool { - return groupKindList[i].String() < groupKindList[j].String() + slices.SortStableFunc(groupKindList, func(a, b schema.GroupKind) int { + return strings.Compare(a.String(), b.String()) }) return groupKindList diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/known_types.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/known_types.go index fd2a663a2..6f8abb665 100644 --- a/vendor/sigs.k8s.io/controller-tools/pkg/crd/known_types.go +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/known_types.go @@ -13,10 +13,13 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + package crd import ( - apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "maps" + + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/utils/ptr" "sigs.k8s.io/controller-tools/pkg/loader" ) @@ -25,35 +28,35 @@ import ( // but don't have validation markers on them (since they're from core Kubernetes). var KnownPackages = map[string]PackageOverride{ "k8s.io/apimachinery/pkg/apis/meta/v1": func(p *Parser, pkg *loader.Package) { - p.Schemata[TypeIdent{Name: "ObjectMeta", Package: pkg}] = apiext.JSONSchemaProps{ + p.Schemata[TypeIdent{Name: "ObjectMeta", Package: pkg}] = apiextensionsv1.JSONSchemaProps{ Type: "object", } - p.Schemata[TypeIdent{Name: "Time", Package: pkg}] = apiext.JSONSchemaProps{ + p.Schemata[TypeIdent{Name: "Time", Package: pkg}] = apiextensionsv1.JSONSchemaProps{ Type: "string", Format: "date-time", } - p.Schemata[TypeIdent{Name: "MicroTime", Package: pkg}] = apiext.JSONSchemaProps{ + p.Schemata[TypeIdent{Name: "MicroTime", Package: pkg}] = apiextensionsv1.JSONSchemaProps{ Type: "string", Format: "date-time", } - p.Schemata[TypeIdent{Name: "Duration", Package: pkg}] = apiext.JSONSchemaProps{ + p.Schemata[TypeIdent{Name: "Duration", Package: pkg}] = apiextensionsv1.JSONSchemaProps{ // TODO(directxman12): regexp validation for this (or get kube to support it as a format value) Type: "string", } - p.Schemata[TypeIdent{Name: "Fields", Package: pkg}] = apiext.JSONSchemaProps{ + p.Schemata[TypeIdent{Name: "Fields", Package: pkg}] = apiextensionsv1.JSONSchemaProps{ // this is a recursive structure that can't be flattened or, for that matter, properly generated. // so just treat it as an arbitrary map Type: "object", - AdditionalProperties: &apiext.JSONSchemaPropsOrBool{Allows: true}, + AdditionalProperties: &apiextensionsv1.JSONSchemaPropsOrBool{Allows: true}, } p.AddPackage(pkg) // get the rest of the types }, "k8s.io/apimachinery/pkg/api/resource": func(p *Parser, pkg *loader.Package) { - p.Schemata[TypeIdent{Name: "Quantity", Package: pkg}] = apiext.JSONSchemaProps{ + p.Schemata[TypeIdent{Name: "Quantity", Package: pkg}] = apiextensionsv1.JSONSchemaProps{ // TODO(directxman12): regexp validation for this (or get kube to support it as a format value) XIntOrString: true, - AnyOf: []apiext.JSONSchemaProps{ + AnyOf: []apiextensionsv1.JSONSchemaProps{ {Type: "integer"}, {Type: "string"}, }, @@ -63,7 +66,7 @@ var KnownPackages = map[string]PackageOverride{ }, "k8s.io/apimachinery/pkg/runtime": func(p *Parser, pkg *loader.Package) { - p.Schemata[TypeIdent{Name: "RawExtension", Package: pkg}] = apiext.JSONSchemaProps{ + p.Schemata[TypeIdent{Name: "RawExtension", Package: pkg}] = apiextensionsv1.JSONSchemaProps{ // TODO(directxman12): regexp validation for this (or get kube to support it as a format value) Type: "object", XPreserveUnknownFields: ptr.To(true), @@ -72,16 +75,16 @@ var KnownPackages = map[string]PackageOverride{ }, "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured": func(p *Parser, pkg *loader.Package) { - p.Schemata[TypeIdent{Name: "Unstructured", Package: pkg}] = apiext.JSONSchemaProps{ + p.Schemata[TypeIdent{Name: "Unstructured", Package: pkg}] = apiextensionsv1.JSONSchemaProps{ Type: "object", } p.AddPackage(pkg) // get the rest of the types }, "k8s.io/apimachinery/pkg/util/intstr": func(p *Parser, pkg *loader.Package) { - p.Schemata[TypeIdent{Name: "IntOrString", Package: pkg}] = apiext.JSONSchemaProps{ + p.Schemata[TypeIdent{Name: "IntOrString", Package: pkg}] = apiextensionsv1.JSONSchemaProps{ XIntOrString: true, - AnyOf: []apiext.JSONSchemaProps{ + AnyOf: []apiextensionsv1.JSONSchemaProps{ {Type: "integer"}, {Type: "string"}, }, @@ -90,13 +93,13 @@ var KnownPackages = map[string]PackageOverride{ }, "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1": func(p *Parser, pkg *loader.Package) { - p.Schemata[TypeIdent{Name: "JSON", Package: pkg}] = apiext.JSONSchemaProps{ + p.Schemata[TypeIdent{Name: "JSON", Package: pkg}] = apiextensionsv1.JSONSchemaProps{ XPreserveUnknownFields: ptr.To(true), } p.AddPackage(pkg) // get the rest of the types }, "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1": func(p *Parser, pkg *loader.Package) { - p.Schemata[TypeIdent{Name: "JSON", Package: pkg}] = apiext.JSONSchemaProps{ + p.Schemata[TypeIdent{Name: "JSON", Package: pkg}] = apiextensionsv1.JSONSchemaProps{ XPreserveUnknownFields: ptr.To(true), } p.AddPackage(pkg) // get the rest of the types @@ -112,9 +115,9 @@ var ObjectMetaPackages = map[string]PackageOverride{ } // This is an allow-listed set of properties of ObjectMeta, other runtime properties are not part of this list // See more discussion: https://github.com/kubernetes-sigs/controller-tools/pull/395#issuecomment-691919433 - p.Schemata[TypeIdent{Name: "ObjectMeta", Package: pkg}] = apiext.JSONSchemaProps{ + p.Schemata[TypeIdent{Name: "ObjectMeta", Package: pkg}] = apiextensionsv1.JSONSchemaProps{ Type: "object", - Properties: map[string]apiext.JSONSchemaProps{ + Properties: map[string]apiextensionsv1.JSONSchemaProps{ "name": { Type: "string", }, @@ -123,24 +126,24 @@ var ObjectMetaPackages = map[string]PackageOverride{ }, "annotations": { Type: "object", - AdditionalProperties: &apiext.JSONSchemaPropsOrBool{ - Schema: &apiext.JSONSchemaProps{ + AdditionalProperties: &apiextensionsv1.JSONSchemaPropsOrBool{ + Schema: &apiextensionsv1.JSONSchemaProps{ Type: "string", }, }, }, "labels": { Type: "object", - AdditionalProperties: &apiext.JSONSchemaPropsOrBool{ - Schema: &apiext.JSONSchemaProps{ + AdditionalProperties: &apiextensionsv1.JSONSchemaPropsOrBool{ + Schema: &apiextensionsv1.JSONSchemaProps{ Type: "string", }, }, }, "finalizers": { Type: "array", - Items: &apiext.JSONSchemaPropsOrArray{ - Schema: &apiext.JSONSchemaProps{ + Items: &apiextensionsv1.JSONSchemaPropsOrArray{ + Schema: &apiextensionsv1.JSONSchemaProps{ Type: "string", }, }, @@ -155,13 +158,9 @@ func AddKnownTypes(parser *Parser) { // ensure everything is there before adding to PackageOverrides // TODO(directxman12): this is a bit of a hack, maybe just use constructors? parser.init() - for pkgName, override := range KnownPackages { - parser.PackageOverrides[pkgName] = override - } + maps.Copy(parser.PackageOverrides, KnownPackages) // if we want to generate the embedded ObjectMeta in the CRD we need to add the ObjectMetaPackages if parser.GenerateEmbeddedObjectMeta { - for pkgName, override := range ObjectMetaPackages { - parser.PackageOverrides[pkgName] = override - } + maps.Copy(parser.PackageOverrides, ObjectMetaPackages) } } diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/crd.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/crd.go index bd3cef563..f02e8f7ee 100644 --- a/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/crd.go +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/crd.go @@ -20,7 +20,7 @@ import ( "fmt" "strings" - apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "sigs.k8s.io/controller-tools/pkg/markers" ) @@ -71,15 +71,15 @@ func init() { // SubresourceStatus enables the "/status" subresource on a CRD. type SubresourceStatus struct{} -func (s SubresourceStatus) ApplyToCRD(crd *apiext.CustomResourceDefinitionSpec, version string) error { - var subresources *apiext.CustomResourceSubresources +func (s SubresourceStatus) ApplyToCRD(crd *apiextensionsv1.CustomResourceDefinitionSpec, version string) error { + var subresources *apiextensionsv1.CustomResourceSubresources for i := range crd.Versions { ver := &crd.Versions[i] if ver.Name != version { continue } if ver.Subresources == nil { - ver.Subresources = &apiext.CustomResourceSubresources{} + ver.Subresources = &apiextensionsv1.CustomResourceSubresources{} } subresources = ver.Subresources break @@ -87,7 +87,7 @@ func (s SubresourceStatus) ApplyToCRD(crd *apiext.CustomResourceDefinitionSpec, if subresources == nil { return fmt.Errorf("status subresource applied to version %q not in CRD", version) } - subresources.Status = &apiext.CustomResourceSubresourceStatus{} + subresources.Status = &apiextensionsv1.CustomResourceSubresourceStatus{} return nil } @@ -110,15 +110,15 @@ type SubresourceScale struct { SelectorPath *string `marker:"selectorpath"` } -func (s SubresourceScale) ApplyToCRD(crd *apiext.CustomResourceDefinitionSpec, version string) error { - var subresources *apiext.CustomResourceSubresources +func (s SubresourceScale) ApplyToCRD(crd *apiextensionsv1.CustomResourceDefinitionSpec, version string) error { + var subresources *apiextensionsv1.CustomResourceSubresources for i := range crd.Versions { ver := &crd.Versions[i] if ver.Name != version { continue } if ver.Subresources == nil { - ver.Subresources = &apiext.CustomResourceSubresources{} + ver.Subresources = &apiextensionsv1.CustomResourceSubresources{} } subresources = ver.Subresources break @@ -126,7 +126,7 @@ func (s SubresourceScale) ApplyToCRD(crd *apiext.CustomResourceDefinitionSpec, v if subresources == nil { return fmt.Errorf("scale subresource applied to version %q not in CRD", version) } - subresources.Scale = &apiext.CustomResourceSubresourceScale{ + subresources.Scale = &apiextensionsv1.CustomResourceSubresourceScale{ SpecReplicasPath: s.SpecPath, StatusReplicasPath: s.StatusPath, LabelSelectorPath: s.SelectorPath, @@ -143,7 +143,7 @@ func (s SubresourceScale) ApplyToCRD(crd *apiext.CustomResourceDefinitionSpec, v // other version will result in conversion to the storage version via a conversion webhook. type StorageVersion struct{} -func (s StorageVersion) ApplyToCRD(crd *apiext.CustomResourceDefinitionSpec, version string) error { +func (s StorageVersion) ApplyToCRD(crd *apiextensionsv1.CustomResourceDefinitionSpec, version string) error { if version == "" { // single-version, do nothing return nil @@ -169,12 +169,12 @@ func (s StorageVersion) ApplyToCRD(crd *apiext.CustomResourceDefinitionSpec, ver // Kubernetes upstream conversion-gen tool. type SkipVersion struct{} -func (s SkipVersion) ApplyToCRD(crd *apiext.CustomResourceDefinitionSpec, version string) error { +func (s SkipVersion) ApplyToCRD(crd *apiextensionsv1.CustomResourceDefinitionSpec, version string) error { if version == "" { // single-version, this is an invalid state return fmt.Errorf("cannot skip a version if there is only a single version") } - var versions []apiext.CustomResourceDefinitionVersion + var versions []apiextensionsv1.CustomResourceDefinitionVersion // multi-version for i := range crd.Versions { ver := crd.Versions[i] @@ -220,15 +220,15 @@ type PrintColumn struct { Priority int32 `marker:",optional"` } -func (s PrintColumn) ApplyToCRD(crd *apiext.CustomResourceDefinitionSpec, version string) error { - var columns *[]apiext.CustomResourceColumnDefinition +func (s PrintColumn) ApplyToCRD(crd *apiextensionsv1.CustomResourceDefinitionSpec, version string) error { + var columns *[]apiextensionsv1.CustomResourceColumnDefinition for i := range crd.Versions { ver := &crd.Versions[i] if ver.Name != version { continue } if ver.Subresources == nil { - ver.Subresources = &apiext.CustomResourceSubresources{} + ver.Subresources = &apiextensionsv1.CustomResourceSubresources{} } columns = &ver.AdditionalPrinterColumns break @@ -237,7 +237,7 @@ func (s PrintColumn) ApplyToCRD(crd *apiext.CustomResourceDefinitionSpec, versio return fmt.Errorf("printer columns applied to version %q not in CRD", version) } - *columns = append(*columns, apiext.CustomResourceColumnDefinition{ + *columns = append(*columns, apiextensionsv1.CustomResourceColumnDefinition{ Name: s.Name, Type: s.Type, JSONPath: s.JSONPath, @@ -285,7 +285,7 @@ type Resource struct { Scope string `marker:",optional"` } -func (s Resource) ApplyToCRD(crd *apiext.CustomResourceDefinitionSpec, _ string) error { +func (s Resource) ApplyToCRD(crd *apiextensionsv1.CustomResourceDefinitionSpec, _ string) error { if s.Path != "" { crd.Names.Plural = s.Path } @@ -297,9 +297,9 @@ func (s Resource) ApplyToCRD(crd *apiext.CustomResourceDefinitionSpec, _ string) switch s.Scope { case "": - crd.Scope = apiext.NamespaceScoped + crd.Scope = apiextensionsv1.NamespaceScoped default: - crd.Scope = apiext.ResourceScope(s.Scope) + crd.Scope = apiextensionsv1.ResourceScope(s.Scope) } return nil @@ -312,7 +312,7 @@ func (s Resource) ApplyToCRD(crd *apiext.CustomResourceDefinitionSpec, _ string) // This is useful if you need to drop support for a version in favor of a newer version. type UnservedVersion struct{} -func (s UnservedVersion) ApplyToCRD(crd *apiext.CustomResourceDefinitionSpec, version string) error { +func (s UnservedVersion) ApplyToCRD(crd *apiextensionsv1.CustomResourceDefinitionSpec, version string) error { for i := range crd.Versions { ver := &crd.Versions[i] if ver.Name != version { @@ -334,7 +334,7 @@ type DeprecatedVersion struct { Warning *string `marker:",optional"` } -func (s DeprecatedVersion) ApplyToCRD(crd *apiext.CustomResourceDefinitionSpec, version string) error { +func (s DeprecatedVersion) ApplyToCRD(crd *apiextensionsv1.CustomResourceDefinitionSpec, version string) error { if version == "" { // single-version, do nothing return nil @@ -364,7 +364,7 @@ type Metadata struct { Labels []string `marker:",optional"` } -func (s Metadata) ApplyToCRD(crd *apiext.CustomResourceDefinition, _ string) error { +func (s Metadata) ApplyToCRD(crd *apiextensionsv1.CustomResourceDefinition, _ string) error { if len(s.Annotations) > 0 { if crd.Annotations == nil { crd.Annotations = map[string]string{} @@ -399,8 +399,8 @@ type SelectableField struct { JSONPath string `marker:"JSONPath"` } -func (s SelectableField) ApplyToCRD(crd *apiext.CustomResourceDefinitionSpec, version string) error { - var selectableFields *[]apiext.SelectableField +func (s SelectableField) ApplyToCRD(crd *apiextensionsv1.CustomResourceDefinitionSpec, version string) error { + var selectableFields *[]apiextensionsv1.SelectableField for i := range crd.Versions { ver := &crd.Versions[i] if ver.Name != version { @@ -413,7 +413,7 @@ func (s SelectableField) ApplyToCRD(crd *apiext.CustomResourceDefinitionSpec, ve return fmt.Errorf("selectable field applied to version %q not in CRD", version) } - *selectableFields = append(*selectableFields, apiext.SelectableField{ + *selectableFields = append(*selectableFields, apiextensionsv1.SelectableField{ JSONPath: s.JSONPath, }) diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/register.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/register.go index b5a2760b0..d219689aa 100644 --- a/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/register.go +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/register.go @@ -79,7 +79,7 @@ type hasHelp interface { // mustMakeAllWithPrefix converts each object into a marker definition using // the object's type's with the prefix to form the marker name. -func mustMakeAllWithPrefix(prefix string, target markers.TargetType, objs ...interface{}) []*definitionWithHelp { +func mustMakeAllWithPrefix(prefix string, target markers.TargetType, objs ...any) []*definitionWithHelp { defs := make([]*definitionWithHelp, len(objs)) for i, obj := range objs { name := prefix + reflect.TypeOf(obj).Name() diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/topology.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/topology.go index 3a660dfe3..b5e696fc5 100644 --- a/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/topology.go +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/topology.go @@ -19,7 +19,7 @@ package markers import ( "fmt" - apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "sigs.k8s.io/controller-tools/pkg/markers" ) @@ -117,7 +117,7 @@ type MapType string // Any changes have to replace the entire struct. type StructType string -func (l ListType) ApplyToSchema(schema *apiext.JSONSchemaProps) error { +func (l ListType) ApplyToSchema(schema *apiextensionsv1.JSONSchemaProps) error { if schema.Type != string(Array) { return fmt.Errorf("must apply listType to an array, found %s", schema.Type) } @@ -133,7 +133,7 @@ func (l ListType) ApplyPriority() ApplyPriority { return ApplyPriorityDefault - 1 } -func (l ListMapKey) ApplyToSchema(schema *apiext.JSONSchemaProps) error { +func (l ListMapKey) ApplyToSchema(schema *apiextensionsv1.JSONSchemaProps) error { if schema.Type != string(Array) { return fmt.Errorf("must apply listMapKey to an array, found %s", schema.Type) } @@ -144,7 +144,7 @@ func (l ListMapKey) ApplyToSchema(schema *apiext.JSONSchemaProps) error { return nil } -func (m MapType) ApplyToSchema(schema *apiext.JSONSchemaProps) error { +func (m MapType) ApplyToSchema(schema *apiextensionsv1.JSONSchemaProps) error { if schema.Type != string(Object) { return fmt.Errorf("must apply mapType to an object") } @@ -159,7 +159,7 @@ func (m MapType) ApplyToSchema(schema *apiext.JSONSchemaProps) error { return nil } -func (s StructType) ApplyToSchema(schema *apiext.JSONSchemaProps) error { +func (s StructType) ApplyToSchema(schema *apiextensionsv1.JSONSchemaProps) error { if schema.Type != string(Object) && schema.Type != "" { return fmt.Errorf("must apply structType to an object; either explicitly set or defaulted through an empty schema type") } diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/validation.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/validation.go index 839b07f6e..8a9e02bb2 100644 --- a/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/validation.go +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/validation.go @@ -22,7 +22,7 @@ import ( "math" "strings" - apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "sigs.k8s.io/controller-tools/pkg/markers" ) @@ -34,6 +34,7 @@ const ( ValidationExactlyOneOfPrefix = validationPrefix + "ExactlyOneOf" ValidationAtMostOneOfPrefix = validationPrefix + "AtMostOneOf" + ValidationAtLeastOneOfPrefix = validationPrefix + "AtLeastOneOf" ) // ValidationMarkers lists all available markers that affect CRD schema generation, @@ -52,6 +53,9 @@ var ValidationMarkers = mustMakeAllWithPrefix(validationPrefix, markers.Describe ExclusiveMaximum(false), ExclusiveMinimum(false), MultipleOf(0), + + // object markers + MinProperties(0), MaxProperties(0), @@ -61,7 +65,7 @@ var ValidationMarkers = mustMakeAllWithPrefix(validationPrefix, markers.Describe MinLength(0), Pattern(""), - // slice markers + // array markers MaxItems(0), MinItems(0), @@ -84,6 +88,8 @@ var TypeOnlyMarkers = []*definitionWithHelp{ WithHelp(markers.SimpleHelp("CRD validation", "specifies a list of field names that must conform to the AtMostOneOf constraint.")), must(markers.MakeDefinition(ValidationExactlyOneOfPrefix, markers.DescribesType, ExactlyOneOf(nil))). WithHelp(markers.SimpleHelp("CRD validation", "specifies a list of field names that must conform to the ExactlyOneOf constraint.")), + must(markers.MakeDefinition(ValidationAtLeastOneOfPrefix, markers.DescribesType, AtLeastOneOf(nil))). + WithHelp(markers.SimpleHelp("CRD validation", "specifies a list of field names that must conform to the AtLeastOneOf constraint.")), } // FieldOnlyMarkers list field-specific validation markers (i.e. those markers that don't make @@ -97,7 +103,10 @@ var FieldOnlyMarkers = []*definitionWithHelp{ WithHelp(markers.SimpleHelp("CRD validation", "specifies that this field is required.")), must(markers.MakeDefinition("optional", markers.DescribesField, struct{}{})). WithHelp(markers.SimpleHelp("CRD validation", "specifies that this field is optional.")), - + must(markers.MakeDefinition("k8s:required", markers.DescribesField, struct{}{})). + WithHelp(markers.SimpleHelp("CRD validation", "specifies that this field is required.")), + must(markers.MakeDefinition("k8s:optional", markers.DescribesField, struct{}{})). + WithHelp(markers.SimpleHelp("CRD validation", "specifies that this field is optional.")), must(markers.MakeDefinition("nullable", markers.DescribesField, Nullable{})). WithHelp(Nullable{}.Help()), @@ -114,9 +123,6 @@ var FieldOnlyMarkers = []*definitionWithHelp{ must(markers.MakeDefinition(SchemalessName, markers.DescribesField, Schemaless{})). WithHelp(Schemaless{}.Help()), - - must(markers.MakeAnyTypeDefinition("kubebuilder:title", markers.DescribesField, Title{})). - WithHelp(Title{}.Help()), } // ValidationIshMarkers are field-and-type markers that don't fall under the @@ -127,6 +133,11 @@ var ValidationIshMarkers = []*definitionWithHelp{ WithHelp(XPreserveUnknownFields{}.Help()), must(markers.MakeDefinition("kubebuilder:pruning:PreserveUnknownFields", markers.DescribesType, XPreserveUnknownFields{})). WithHelp(XPreserveUnknownFields{}.Help()), + + must(markers.MakeAnyTypeDefinition("kubebuilder:title", markers.DescribesField, Title{})). + WithHelp(Title{}.Help()), + must(markers.MakeAnyTypeDefinition("kubebuilder:title", markers.DescribesType, Title{})). + WithHelp(Title{}.Help()), } func init() { @@ -156,95 +167,94 @@ func init() { AllDefinitions = append(AllDefinitions, ValidationIshMarkers...) } -// +controllertools:marker:generateHelp:category="CRD validation" // Maximum specifies the maximum numeric value that this field can have. +// +controllertools:marker:generateHelp:category="CRD validation" type Maximum float64 func (m Maximum) Value() float64 { return float64(m) } -// +controllertools:marker:generateHelp:category="CRD validation" // Minimum specifies the minimum numeric value that this field can have. Negative numbers are supported. +// +controllertools:marker:generateHelp:category="CRD validation" type Minimum float64 func (m Minimum) Value() float64 { return float64(m) } -// +controllertools:marker:generateHelp:category="CRD validation" // ExclusiveMinimum indicates that the minimum is "up to" but not including that value. +// +controllertools:marker:generateHelp:category="CRD validation" type ExclusiveMinimum bool -// +controllertools:marker:generateHelp:category="CRD validation" // ExclusiveMaximum indicates that the maximum is "up to" but not including that value. +// +controllertools:marker:generateHelp:category="CRD validation" type ExclusiveMaximum bool -// +controllertools:marker:generateHelp:category="CRD validation" // MultipleOf specifies that this field must have a numeric value that's a multiple of this one. +// +controllertools:marker:generateHelp:category="CRD validation" type MultipleOf float64 func (m MultipleOf) Value() float64 { return float64(m) } -// +controllertools:marker:generateHelp:category="CRD validation" // MaxLength specifies the maximum length for this string. +// +controllertools:marker:generateHelp:category="CRD validation" type MaxLength int -// +controllertools:marker:generateHelp:category="CRD validation" // MinLength specifies the minimum length for this string. +// +controllertools:marker:generateHelp:category="CRD validation" type MinLength int -// +controllertools:marker:generateHelp:category="CRD validation" // Pattern specifies that this string must match the given regular expression. +// +controllertools:marker:generateHelp:category="CRD validation" type Pattern string -// +controllertools:marker:generateHelp:category="CRD validation" // MaxItems specifies the maximum length for this list. +// +controllertools:marker:generateHelp:category="CRD validation" type MaxItems int -// +controllertools:marker:generateHelp:category="CRD validation" // MinItems specifies the minimum length for this list. +// +controllertools:marker:generateHelp:category="CRD validation" type MinItems int -// +controllertools:marker:generateHelp:category="CRD validation" // UniqueItems specifies that all items in this list must be unique. +// +controllertools:marker:generateHelp:category="CRD validation" type UniqueItems bool -// +controllertools:marker:generateHelp:category="CRD validation" // MaxProperties restricts the number of keys in an object +// +controllertools:marker:generateHelp:category="CRD validation" type MaxProperties int -// +controllertools:marker:generateHelp:category="CRD validation" // MinProperties restricts the number of keys in an object +// +controllertools:marker:generateHelp:category="CRD validation" type MinProperties int -// +controllertools:marker:generateHelp:category="CRD validation" // Enum specifies that this (scalar) field is restricted to the *exact* values specified here. -type Enum []interface{} - // +controllertools:marker:generateHelp:category="CRD validation" +type Enum []any + // Format specifies additional "complex" formatting for this field. // // For example, a date-time field would be marked as "type: string" and // "format: date-time". +// +controllertools:marker:generateHelp:category="CRD validation" type Format string -// +controllertools:marker:generateHelp:category="CRD validation" // Type overrides the type for this field (which defaults to the equivalent of the Go type). // // This generally must be paired with custom serialization. For example, the // metav1.Time field would be marked as "type: string" and "format: date-time". +// +controllertools:marker:generateHelp:category="CRD validation" type Type string -// +controllertools:marker:generateHelp:category="CRD validation" // Nullable marks this field as allowing the "null" value. // // This is often not necessary, but may be helpful with custom serialization. +// +controllertools:marker:generateHelp:category="CRD validation" type Nullable struct{} -// +controllertools:marker:generateHelp:category="CRD validation" // Default sets the default value for this field. // // A default value will be accepted as any value valid for the @@ -253,23 +263,23 @@ type Nullable struct{} // "delete"}`). Defaults should be defined in pruned form, and only best-effort // validation will be performed. Full validation of a default requires // submission of the containing CRD to an apiserver. +// +controllertools:marker:generateHelp:category="CRD validation" type Default struct { - Value interface{} + Value any } -// +controllertools:marker:generateHelp:category="CRD validation" // Title sets the title for this field. // // The title is metadata that makes the OpenAPI documentation more user-friendly, // making the schema more understandable when viewed in documentation tools. // It's a metadata field that doesn't affect validation but provides // important context about what the schema represents. +// +controllertools:marker:generateHelp:category="CRD validation" type Title struct { - Value interface{} + Value any } -// +controllertools:marker:generateHelp:category="CRD validation" -// Default sets the default value for this field. +// KubernetesDefault sets the default value for this field. // // A default value will be accepted as any value valid for the field. // Only JSON-formatted values are accepted. `ref(...)` values are ignored. @@ -278,11 +288,11 @@ type Title struct { // "delete"}`). Defaults should be defined in pruned form, and only best-effort // validation will be performed. Full validation of a default requires // submission of the containing CRD to an apiserver. +// +controllertools:marker:generateHelp:category="CRD validation" type KubernetesDefault struct { - Value interface{} + Value any } -// +controllertools:marker:generateHelp:category="CRD validation" // Example sets the example value for this field. // // An example value will be accepted as any value valid for the @@ -291,12 +301,12 @@ type KubernetesDefault struct { // "delete"}`). Examples should be defined in pruned form, and only best-effort // validation will be performed. Full validation of an example requires // submission of the containing CRD to an apiserver. +// +controllertools:marker:generateHelp:category="CRD validation" type Example struct { - Value interface{} + Value any } -// +controllertools:marker:generateHelp:category="CRD processing" -// PreserveUnknownFields stops the apiserver from pruning fields which are not specified. +// XPreserveUnknownFields stops the apiserver from pruning fields which are not specified. // // By default the apiserver drops unknown fields from the request payload // during the decoding step. This marker stops the API server from doing so. @@ -308,26 +318,26 @@ type Example struct { // NB: The kubebuilder:validation:XPreserveUnknownFields variant is deprecated // in favor of the kubebuilder:pruning:PreserveUnknownFields variant. They function // identically. +// +controllertools:marker:generateHelp:category="CRD processing" type XPreserveUnknownFields struct{} -// +controllertools:marker:generateHelp:category="CRD validation" -// EmbeddedResource marks a fields as an embedded resource with apiVersion, kind and metadata fields. +// XEmbeddedResource marks a fields as an embedded resource with apiVersion, kind and metadata fields. // // An embedded resource is a value that has apiVersion, kind and metadata fields. // They are validated implicitly according to the semantics of the currently // running apiserver. It is not necessary to add any additional schema for these // field, yet it is possible. This can be combined with PreserveUnknownFields. +// +controllertools:marker:generateHelp:category="CRD validation" type XEmbeddedResource struct{} -// +controllertools:marker:generateHelp:category="CRD validation" -// IntOrString marks a fields as an IntOrString. +// XIntOrString marks a fields as an IntOrString. // // This is required when applying patterns or other validations to an IntOrString // field. Known information about the type is applied during the collapse phase // and as such is not normally available during marker application. +// +controllertools:marker:generateHelp:category="CRD validation" type XIntOrString struct{} -// +controllertools:marker:generateHelp:category="CRD validation" // Schemaless marks a field as being a schemaless object. // // Schemaless objects are not introspected, so you must provide @@ -335,13 +345,14 @@ type XIntOrString struct{} // tag is for embedding fields that hold JSONSchema typed objects. // Because this field disables all type checking, it is recommended // to be used only as a last resort. +// +controllertools:marker:generateHelp:category="CRD validation" type Schemaless struct{} -func hasNumericType(schema *apiext.JSONSchemaProps) bool { +func hasNumericType(schema *apiextensionsv1.JSONSchemaProps) bool { return schema.Type == string(Integer) || schema.Type == string(Number) } -func hasTextualType(schema *apiext.JSONSchemaProps) bool { +func hasTextualType(schema *apiextensionsv1.JSONSchemaProps) bool { return schema.Type == "string" || schema.XIntOrString } @@ -349,12 +360,12 @@ func isIntegral(value float64) bool { return value == math.Trunc(value) && !math.IsNaN(value) && !math.IsInf(value, 0) } -// +controllertools:marker:generateHelp:category="CRD validation" // XValidation marks a field as requiring a value for which a given // expression evaluates to true. // // This marker may be repeated to specify multiple expressions, all of // which must evaluate to true. +// +controllertools:marker:generateHelp:category="CRD validation" type XValidation struct { Rule string Message string `marker:",optional"` @@ -364,19 +375,25 @@ type XValidation struct { OptionalOldSelf *bool `marker:"optionalOldSelf,optional"` } -// +controllertools:marker:generateHelp:category="CRD validation" // AtMostOneOf adds a validation constraint that allows at most one of the specified fields. // // This marker may be repeated to specify multiple AtMostOneOf constraints that are mutually exclusive. +// +controllertools:marker:generateHelp:category="CRD validation" type AtMostOneOf []string -// +controllertools:marker:generateHelp:category="CRD validation" // ExactlyOneOf adds a validation constraint that allows at exactly one of the specified fields. // // This marker may be repeated to specify multiple ExactlyOneOf constraints that are mutually exclusive. +// +controllertools:marker:generateHelp:category="CRD validation" type ExactlyOneOf []string -func (m Maximum) ApplyToSchema(schema *apiext.JSONSchemaProps) error { +// AtLeastOneOf adds a validation constraint that allows at least one of the specified fields. +// +// This marker may be repeated to specify multiple AtLeastOneOf constraints that are mutually exclusive. +// +controllertools:marker:generateHelp:category="CRD validation" +type AtLeastOneOf []string + +func (m Maximum) ApplyToSchema(schema *apiextensionsv1.JSONSchemaProps) error { if !hasNumericType(schema) { return fmt.Errorf("must apply maximum to a numeric value, found %s", schema.Type) } @@ -390,7 +407,7 @@ func (m Maximum) ApplyToSchema(schema *apiext.JSONSchemaProps) error { return nil } -func (m Minimum) ApplyToSchema(schema *apiext.JSONSchemaProps) error { +func (m Minimum) ApplyToSchema(schema *apiextensionsv1.JSONSchemaProps) error { if !hasNumericType(schema) { return fmt.Errorf("must apply minimum to a numeric value, found %s", schema.Type) } @@ -404,7 +421,7 @@ func (m Minimum) ApplyToSchema(schema *apiext.JSONSchemaProps) error { return nil } -func (m ExclusiveMaximum) ApplyToSchema(schema *apiext.JSONSchemaProps) error { +func (m ExclusiveMaximum) ApplyToSchema(schema *apiextensionsv1.JSONSchemaProps) error { if !hasNumericType(schema) { return fmt.Errorf("must apply exclusivemaximum to a numeric value, found %s", schema.Type) } @@ -412,7 +429,7 @@ func (m ExclusiveMaximum) ApplyToSchema(schema *apiext.JSONSchemaProps) error { return nil } -func (m ExclusiveMinimum) ApplyToSchema(schema *apiext.JSONSchemaProps) error { +func (m ExclusiveMinimum) ApplyToSchema(schema *apiextensionsv1.JSONSchemaProps) error { if !hasNumericType(schema) { return fmt.Errorf("must apply exclusiveminimum to a numeric value, found %s", schema.Type) } @@ -421,7 +438,7 @@ func (m ExclusiveMinimum) ApplyToSchema(schema *apiext.JSONSchemaProps) error { return nil } -func (m MultipleOf) ApplyToSchema(schema *apiext.JSONSchemaProps) error { +func (m MultipleOf) ApplyToSchema(schema *apiextensionsv1.JSONSchemaProps) error { if !hasNumericType(schema) { return fmt.Errorf("must apply multipleof to a numeric value, found %s", schema.Type) } @@ -435,7 +452,7 @@ func (m MultipleOf) ApplyToSchema(schema *apiext.JSONSchemaProps) error { return nil } -func (m MaxLength) ApplyToSchema(schema *apiext.JSONSchemaProps) error { +func (m MaxLength) ApplyToSchema(schema *apiextensionsv1.JSONSchemaProps) error { if !hasTextualType(schema) { return fmt.Errorf("must apply maxlength to a textual value, found type %q", schema.Type) } @@ -444,7 +461,7 @@ func (m MaxLength) ApplyToSchema(schema *apiext.JSONSchemaProps) error { return nil } -func (m MinLength) ApplyToSchema(schema *apiext.JSONSchemaProps) error { +func (m MinLength) ApplyToSchema(schema *apiextensionsv1.JSONSchemaProps) error { if !hasTextualType(schema) { return fmt.Errorf("must apply minlength to a textual value, found type %q", schema.Type) } @@ -453,7 +470,7 @@ func (m MinLength) ApplyToSchema(schema *apiext.JSONSchemaProps) error { return nil } -func (m Pattern) ApplyToSchema(schema *apiext.JSONSchemaProps) error { +func (m Pattern) ApplyToSchema(schema *apiextensionsv1.JSONSchemaProps) error { if !hasTextualType(schema) { return fmt.Errorf("must apply pattern to a textual value, found type %q", schema.Type) } @@ -461,7 +478,7 @@ func (m Pattern) ApplyToSchema(schema *apiext.JSONSchemaProps) error { return nil } -func (m MaxItems) ApplyToSchema(schema *apiext.JSONSchemaProps) error { +func (m MaxItems) ApplyToSchema(schema *apiextensionsv1.JSONSchemaProps) error { if schema.Type != string(Array) { return fmt.Errorf("must apply maxitem to an array") } @@ -470,7 +487,7 @@ func (m MaxItems) ApplyToSchema(schema *apiext.JSONSchemaProps) error { return nil } -func (m MinItems) ApplyToSchema(schema *apiext.JSONSchemaProps) error { +func (m MinItems) ApplyToSchema(schema *apiextensionsv1.JSONSchemaProps) error { if schema.Type != string(Array) { return fmt.Errorf("must apply minitems to an array") } @@ -479,7 +496,7 @@ func (m MinItems) ApplyToSchema(schema *apiext.JSONSchemaProps) error { return nil } -func (m UniqueItems) ApplyToSchema(schema *apiext.JSONSchemaProps) error { +func (m UniqueItems) ApplyToSchema(schema *apiextensionsv1.JSONSchemaProps) error { if schema.Type != "array" { return fmt.Errorf("must apply uniqueitems to an array") } @@ -487,7 +504,7 @@ func (m UniqueItems) ApplyToSchema(schema *apiext.JSONSchemaProps) error { return nil } -func (m MinProperties) ApplyToSchema(schema *apiext.JSONSchemaProps) error { +func (m MinProperties) ApplyToSchema(schema *apiextensionsv1.JSONSchemaProps) error { if schema.Type != "object" { return fmt.Errorf("must apply minproperties to an object") } @@ -496,7 +513,7 @@ func (m MinProperties) ApplyToSchema(schema *apiext.JSONSchemaProps) error { return nil } -func (m MaxProperties) ApplyToSchema(schema *apiext.JSONSchemaProps) error { +func (m MaxProperties) ApplyToSchema(schema *apiextensionsv1.JSONSchemaProps) error { if schema.Type != "object" { return fmt.Errorf("must apply maxproperties to an object") } @@ -505,10 +522,10 @@ func (m MaxProperties) ApplyToSchema(schema *apiext.JSONSchemaProps) error { return nil } -func (m Enum) ApplyToSchema(schema *apiext.JSONSchemaProps) error { +func (m Enum) ApplyToSchema(schema *apiextensionsv1.JSONSchemaProps) error { // TODO(directxman12): this is a bit hacky -- we should // probably support AnyType better + using the schema structure - vals := make([]apiext.JSON, len(m)) + vals := make([]apiextensionsv1.JSON, len(m)) for i, val := range m { // TODO(directxman12): check actual type with schema type? // if we're expecting a string, marshal the string properly... @@ -517,13 +534,13 @@ func (m Enum) ApplyToSchema(schema *apiext.JSONSchemaProps) error { if err != nil { return err } - vals[i] = apiext.JSON{Raw: valMarshalled} + vals[i] = apiextensionsv1.JSON{Raw: valMarshalled} } schema.Enum = vals return nil } -func (m Format) ApplyToSchema(schema *apiext.JSONSchemaProps) error { +func (m Format) ApplyToSchema(schema *apiextensionsv1.JSONSchemaProps) error { schema.Format = string(m) return nil } @@ -533,7 +550,7 @@ func (m Format) ApplyToSchema(schema *apiext.JSONSchemaProps) error { // TODO(directxman12): find a less hacky way to do this // (we could preserve ordering of markers, but that feels bad in its own right). -func (m Type) ApplyToSchema(schema *apiext.JSONSchemaProps) error { +func (m Type) ApplyToSchema(schema *apiextensionsv1.JSONSchemaProps) error { schema.Type = string(m) return nil } @@ -542,13 +559,13 @@ func (m Type) ApplyPriority() ApplyPriority { return ApplyPriorityDefault - 1 } -func (m Nullable) ApplyToSchema(schema *apiext.JSONSchemaProps) error { +func (m Nullable) ApplyToSchema(schema *apiextensionsv1.JSONSchemaProps) error { schema.Nullable = true return nil } -// Defaults are only valid CRDs created with the v1 API -func (m Default) ApplyToSchema(schema *apiext.JSONSchemaProps) error { +// ApplyToSchema defaults are only valid CRDs created with the v1 API +func (m Default) ApplyToSchema(schema *apiextensionsv1.JSONSchemaProps) error { marshalledDefault, err := json.Marshal(m.Value) if err != nil { return err @@ -556,7 +573,7 @@ func (m Default) ApplyToSchema(schema *apiext.JSONSchemaProps) error { if schema.Type == "array" && string(marshalledDefault) == "{}" { marshalledDefault = []byte("[]") } - schema.Default = &apiext.JSON{Raw: marshalledDefault} + schema.Default = &apiextensionsv1.JSON{Raw: marshalledDefault} return nil } @@ -565,7 +582,7 @@ func (m Default) ApplyPriority() ApplyPriority { return 10 } -func (m Title) ApplyToSchema(schema *apiext.JSONSchemaProps) error { +func (m Title) ApplyToSchema(schema *apiextensionsv1.JSONSchemaProps) error { if m.Value == nil { // only apply to the schema if we have a non-nil title return nil @@ -587,8 +604,8 @@ func (m *KubernetesDefault) ParseMarker(_ string, _ string, restFields string) e return json.Unmarshal([]byte(restFields), &m.Value) } -// Defaults are only valid CRDs created with the v1 API -func (m KubernetesDefault) ApplyToSchema(schema *apiext.JSONSchemaProps) error { +// ApplyToSchema defaults are only valid CRDs created with the v1 API +func (m KubernetesDefault) ApplyToSchema(schema *apiextensionsv1.JSONSchemaProps) error { if m.Value == nil { // only apply to the schema if we have a non-nil default value return nil @@ -597,7 +614,7 @@ func (m KubernetesDefault) ApplyToSchema(schema *apiext.JSONSchemaProps) error { if err != nil { return err } - schema.Default = &apiext.JSON{Raw: marshalledDefault} + schema.Default = &apiextensionsv1.JSON{Raw: marshalledDefault} return nil } @@ -606,22 +623,22 @@ func (m KubernetesDefault) ApplyPriority() ApplyPriority { return 9 } -func (m Example) ApplyToSchema(schema *apiext.JSONSchemaProps) error { +func (m Example) ApplyToSchema(schema *apiextensionsv1.JSONSchemaProps) error { marshalledExample, err := json.Marshal(m.Value) if err != nil { return err } - schema.Example = &apiext.JSON{Raw: marshalledExample} + schema.Example = &apiextensionsv1.JSON{Raw: marshalledExample} return nil } -func (m XPreserveUnknownFields) ApplyToSchema(schema *apiext.JSONSchemaProps) error { +func (m XPreserveUnknownFields) ApplyToSchema(schema *apiextensionsv1.JSONSchemaProps) error { defTrue := true schema.XPreserveUnknownFields = &defTrue return nil } -func (m XEmbeddedResource) ApplyToSchema(schema *apiext.JSONSchemaProps) error { +func (m XEmbeddedResource) ApplyToSchema(schema *apiextensionsv1.JSONSchemaProps) error { schema.XEmbeddedResource = true return nil } @@ -629,7 +646,7 @@ func (m XEmbeddedResource) ApplyToSchema(schema *apiext.JSONSchemaProps) error { // NB(JoelSpeed): we use this property in other markers here, // which means the "XIntOrString" marker *must* be applied first. -func (m XIntOrString) ApplyToSchema(schema *apiext.JSONSchemaProps) error { +func (m XIntOrString) ApplyToSchema(schema *apiextensionsv1.JSONSchemaProps) error { schema.XIntOrString = true return nil } @@ -638,18 +655,18 @@ func (m XIntOrString) ApplyPriority() ApplyPriority { return ApplyPriorityDefault - 1 } -func (m XValidation) ApplyToSchema(schema *apiext.JSONSchemaProps) error { - var reason *apiext.FieldValueErrorReason +func (m XValidation) ApplyToSchema(schema *apiextensionsv1.JSONSchemaProps) error { + var reason *apiextensionsv1.FieldValueErrorReason if m.Reason != "" { switch m.Reason { - case string(apiext.FieldValueRequired), string(apiext.FieldValueInvalid), string(apiext.FieldValueForbidden), string(apiext.FieldValueDuplicate): - reason = (*apiext.FieldValueErrorReason)(&m.Reason) + case string(apiextensionsv1.FieldValueRequired), string(apiextensionsv1.FieldValueInvalid), string(apiextensionsv1.FieldValueForbidden), string(apiextensionsv1.FieldValueDuplicate): + reason = (*apiextensionsv1.FieldValueErrorReason)(&m.Reason) default: - return fmt.Errorf("invalid reason %s, valid values are %s, %s, %s and %s", m.Reason, apiext.FieldValueRequired, apiext.FieldValueInvalid, apiext.FieldValueForbidden, apiext.FieldValueDuplicate) + return fmt.Errorf("invalid reason %s, valid values are %s, %s, %s and %s", m.Reason, apiextensionsv1.FieldValueRequired, apiextensionsv1.FieldValueInvalid, apiextensionsv1.FieldValueForbidden, apiextensionsv1.FieldValueDuplicate) } } - schema.XValidations = append(schema.XValidations, apiext.ValidationRule{ + schema.XValidations = append(schema.XValidations, apiextensionsv1.ValidationRule{ Rule: m.Rule, Message: m.Message, MessageExpression: m.MessageExpression, @@ -664,7 +681,7 @@ func (XValidation) ApplyPriority() ApplyPriority { return ApplyPriorityDefault } -func (fields AtMostOneOf) ApplyToSchema(schema *apiext.JSONSchemaProps) error { +func (fields AtMostOneOf) ApplyToSchema(schema *apiextensionsv1.JSONSchemaProps) error { if len(fields) == 0 { return nil } @@ -681,7 +698,7 @@ func (AtMostOneOf) ApplyPriority() ApplyPriority { return XValidation{}.ApplyPriority() + 1 } -func (fields ExactlyOneOf) ApplyToSchema(schema *apiext.JSONSchemaProps) error { +func (fields ExactlyOneOf) ApplyToSchema(schema *apiextensionsv1.JSONSchemaProps) error { if len(fields) == 0 { return nil } @@ -694,8 +711,25 @@ func (fields ExactlyOneOf) ApplyToSchema(schema *apiext.JSONSchemaProps) error { } func (ExactlyOneOf) ApplyPriority() ApplyPriority { - // explicitly go after XValidation markers so that the ordering is deterministic - return XValidation{}.ApplyPriority() + 1 + // explicitly go after AtMostOneOf markers so that the ordering is deterministic + return AtMostOneOf{}.ApplyPriority() + 1 +} + +func (fields AtLeastOneOf) ApplyToSchema(schema *apiextensionsv1.JSONSchemaProps) error { + if len(fields) == 0 { + return nil + } + rule := fieldsToOneOfCelRuleStr(fields) + xvalidation := XValidation{ + Rule: fmt.Sprintf("%s >= 1", rule), + Message: fmt.Sprintf("at least one of the fields in %v must be set", fields), + } + return xvalidation.ApplyToSchema(schema) +} + +func (AtLeastOneOf) ApplyPriority() ApplyPriority { + // explicitly go after ExactlyOneOf markers so that the ordering is deterministic + return ExactlyOneOf{}.ApplyPriority() + 1 } // fieldsToOneOfCelRuleStr converts a slice of field names to a string representation diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/zz_generated.markerhelp.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/zz_generated.markerhelp.go index 1f336df9c..bf650ab23 100644 --- a/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/zz_generated.markerhelp.go +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/zz_generated.markerhelp.go @@ -24,6 +24,17 @@ import ( "sigs.k8s.io/controller-tools/pkg/markers" ) +func (AtLeastOneOf) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "CRD validation", + DetailedHelp: markers.DetailedHelp{ + Summary: "adds a validation constraint that allows at least one of the specified fields.", + Details: "This marker may be repeated to specify multiple AtLeastOneOf constraints that are mutually exclusive.", + }, + FieldHelp: map[string]markers.DetailedHelp{}, + } +} + func (AtMostOneOf) Help() *markers.DefinitionHelp { return &markers.DefinitionHelp{ Category: "CRD validation", @@ -142,7 +153,7 @@ func (KubernetesDefault) Help() *markers.DefinitionHelp { return &markers.DefinitionHelp{ Category: "CRD validation", DetailedHelp: markers.DetailedHelp{ - Summary: "Default sets the default value for this field.", + Summary: "sets the default value for this field.", Details: "A default value will be accepted as any value valid for the field.\nOnly JSON-formatted values are accepted. `ref(...)` values are ignored.\nFormatting for common types include: boolean: `true`, string:\n`\"Cluster\"`, numerical: `1.24`, array: `[1,2]`, object: `{\"policy\":\n\"delete\"}`). Defaults should be defined in pruned form, and only best-effort\nvalidation will be performed. Full validation of a default requires\nsubmission of the containing CRD to an apiserver.", }, FieldHelp: map[string]markers.DetailedHelp{ @@ -544,7 +555,7 @@ func (XEmbeddedResource) Help() *markers.DefinitionHelp { return &markers.DefinitionHelp{ Category: "CRD validation", DetailedHelp: markers.DetailedHelp{ - Summary: "EmbeddedResource marks a fields as an embedded resource with apiVersion, kind and metadata fields.", + Summary: "marks a fields as an embedded resource with apiVersion, kind and metadata fields.", Details: "An embedded resource is a value that has apiVersion, kind and metadata fields.\nThey are validated implicitly according to the semantics of the currently\nrunning apiserver. It is not necessary to add any additional schema for these\nfield, yet it is possible. This can be combined with PreserveUnknownFields.", }, FieldHelp: map[string]markers.DetailedHelp{}, @@ -555,7 +566,7 @@ func (XIntOrString) Help() *markers.DefinitionHelp { return &markers.DefinitionHelp{ Category: "CRD validation", DetailedHelp: markers.DetailedHelp{ - Summary: "IntOrString marks a fields as an IntOrString.", + Summary: "marks a fields as an IntOrString.", Details: "This is required when applying patterns or other validations to an IntOrString\nfield. Known information about the type is applied during the collapse phase\nand as such is not normally available during marker application.", }, FieldHelp: map[string]markers.DetailedHelp{}, @@ -566,7 +577,7 @@ func (XPreserveUnknownFields) Help() *markers.DefinitionHelp { return &markers.DefinitionHelp{ Category: "CRD processing", DetailedHelp: markers.DetailedHelp{ - Summary: "PreserveUnknownFields stops the apiserver from pruning fields which are not specified.", + Summary: "stops the apiserver from pruning fields which are not specified.", Details: "By default the apiserver drops unknown fields from the request payload\nduring the decoding step. This marker stops the API server from doing so.\nIt affects fields recursively, but switches back to normal pruning behaviour\nif nested properties or additionalProperties are specified in the schema.\nThis can either be true or undefined. False\nis forbidden.\n\nNB: The kubebuilder:validation:XPreserveUnknownFields variant is deprecated\nin favor of the kubebuilder:pruning:PreserveUnknownFields variant. They function\nidentically.", }, FieldHelp: map[string]markers.DetailedHelp{}, diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/parser.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/parser.go index 3281f24a1..fb7c62d53 100644 --- a/vendor/sigs.k8s.io/controller-tools/pkg/crd/parser.go +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/parser.go @@ -19,7 +19,7 @@ package crd import ( "fmt" - apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/runtime/schema" "sigs.k8s.io/controller-tools/pkg/internal/crd" "sigs.k8s.io/controller-tools/pkg/loader" @@ -52,15 +52,15 @@ type Parser struct { // Types contains the known TypeInfo for this parser. Types map[TypeIdent]*markers.TypeInfo // Schemata contains the known OpenAPI JSONSchemata for this parser. - Schemata map[TypeIdent]apiext.JSONSchemaProps + Schemata map[TypeIdent]apiextensionsv1.JSONSchemaProps // GroupVersions contains the known group-versions of each package in this parser. GroupVersions map[*loader.Package]schema.GroupVersion // CustomResourceDefinitions contains the known CustomResourceDefinitions for types in this parser. - CustomResourceDefinitions map[schema.GroupKind]apiext.CustomResourceDefinition + CustomResourceDefinitions map[schema.GroupKind]apiextensionsv1.CustomResourceDefinition // FlattenedSchemata contains fully flattened schemata for use in building // CustomResourceDefinition validation. Each schema has been flattened by the flattener, // and then embedded fields have been flattened with FlattenEmbedded. - FlattenedSchemata map[TypeIdent]apiext.JSONSchemaProps + FlattenedSchemata map[TypeIdent]apiextensionsv1.JSONSchemaProps // PackageOverrides indicates that the loading of any package with // the given path should be handled by the given overrider. @@ -104,7 +104,7 @@ func (p *Parser) init() { } } if p.Schemata == nil { - p.Schemata = make(map[TypeIdent]apiext.JSONSchemaProps) + p.Schemata = make(map[TypeIdent]apiextensionsv1.JSONSchemaProps) } if p.Types == nil { p.Types = make(map[TypeIdent]*markers.TypeInfo) @@ -116,10 +116,10 @@ func (p *Parser) init() { p.GroupVersions = make(map[*loader.Package]schema.GroupVersion) } if p.CustomResourceDefinitions == nil { - p.CustomResourceDefinitions = make(map[schema.GroupKind]apiext.CustomResourceDefinition) + p.CustomResourceDefinitions = make(map[schema.GroupKind]apiextensionsv1.CustomResourceDefinition) } if p.FlattenedSchemata == nil { - p.FlattenedSchemata = make(map[TypeIdent]apiext.JSONSchemaProps) + p.FlattenedSchemata = make(map[TypeIdent]apiextensionsv1.JSONSchemaProps) } } @@ -170,7 +170,7 @@ func (p *Parser) NeedSchemaFor(typ TypeIdent) { } // avoid tripping recursive schemata, like ManagedFields, by adding an empty WIP schema - p.Schemata[typ] = apiext.JSONSchemaProps{} + p.Schemata[typ] = apiextensionsv1.JSONSchemaProps{} schemaCtx := newSchemaContext(typ.Package, p, p.AllowDangerousTypes, p.IgnoreUnexportedFields) ctxForInfo := schemaCtx.ForInfo(info) diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/schema.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/schema.go index efb09b7c9..cd923abe2 100644 --- a/vendor/sigs.k8s.io/controller-tools/pkg/crd/schema.go +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/schema.go @@ -22,10 +22,10 @@ import ( "go/ast" "go/token" "go/types" - "sort" + "slices" "strings" - apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/util/sets" crdmarkers "sigs.k8s.io/controller-tools/pkg/crd/markers" "sigs.k8s.io/controller-tools/pkg/loader" @@ -49,7 +49,7 @@ var byteType = types.Universe.Lookup("byte").Type() type SchemaMarker interface { // ApplyToSchema is called after the rest of the schema for a given type // or field is generated, to modify the schema appropriately. - ApplyToSchema(*apiext.JSONSchemaProps) error + ApplyToSchema(*apiextensionsv1.JSONSchemaProps) error } // applyFirstMarker is applied before any other markers. It's a bit of a hack. @@ -60,6 +60,7 @@ type applyFirstMarker interface { // schemaRequester knows how to marker that another schema (e.g. via an external reference) is necessary. type schemaRequester interface { NeedSchemaFor(typ TypeIdent) + LookupType(pkg *loader.Package, name string) *markers.TypeInfo } // schemaContext stores and provides information across a hierarchy of schema generation. @@ -112,14 +113,14 @@ func (c *schemaContext) requestSchema(pkgPath, typeName string) { } // infoToSchema creates a schema for the type in the given set of type information. -func infoToSchema(ctx *schemaContext) *apiext.JSONSchemaProps { +func infoToSchema(ctx *schemaContext) *apiextensionsv1.JSONSchemaProps { if obj := ctx.pkg.Types.Scope().Lookup(ctx.info.Name); obj != nil { switch { // If the obj implements a JSON marshaler and has a marker, use the // markers value and do not traverse as the marshaler could be doing // anything. If there is no marker, fall back to traversing. case implements(obj.Type(), jsonMarshaler): - schema := &apiext.JSONSchemaProps{} + schema := &apiextensionsv1.JSONSchemaProps{} applyMarkers(ctx, ctx.info.Markers, schema, ctx.info.RawSpec.Type) if schema.Type != "" { return schema @@ -127,7 +128,7 @@ func infoToSchema(ctx *schemaContext) *apiext.JSONSchemaProps { // If the obj implements a text marshaler, encode it as a string. case implements(obj.Type(), textMarshaler): - schema := &apiext.JSONSchemaProps{Type: "string"} + schema := &apiextensionsv1.JSONSchemaProps{Type: "string"} applyMarkers(ctx, ctx.info.Markers, schema, ctx.info.RawSpec.Type) if schema.Type != "string" { err := fmt.Errorf("%q implements encoding.TextMarshaler but schema type is not string: %q", ctx.info.RawSpec.Name, schema.Type) @@ -145,7 +146,7 @@ type schemaMarkerWithName struct { } // applyMarkers applies schema markers given their priority to the given schema -func applyMarkers(ctx *schemaContext, markerSet markers.MarkerValues, props *apiext.JSONSchemaProps, node ast.Node) { +func applyMarkers(ctx *schemaContext, markerSet markers.MarkerValues, props *apiextensionsv1.JSONSchemaProps, node ast.Node) { markers := make([]schemaMarkerWithName, 0, len(markerSet)) itemsMarkers := make([]schemaMarkerWithName, 0, len(markerSet)) @@ -167,10 +168,10 @@ func applyMarkers(ctx *schemaContext, markerSet markers.MarkerValues, props *api } } - cmpPriority := func(markers []schemaMarkerWithName, i, j int) bool { + cmpPriority := func(i, j schemaMarkerWithName) int { var iPriority, jPriority crdmarkers.ApplyPriority - switch m := markers[i].SchemaMarker.(type) { + switch m := i.SchemaMarker.(type) { case crdmarkers.ApplyPriorityMarker: iPriority = m.ApplyPriority() case applyFirstMarker: @@ -179,7 +180,7 @@ func applyMarkers(ctx *schemaContext, markerSet markers.MarkerValues, props *api iPriority = crdmarkers.ApplyPriorityDefault } - switch m := markers[j].SchemaMarker.(type) { + switch m := j.SchemaMarker.(type) { case crdmarkers.ApplyPriorityMarker: jPriority = m.ApplyPriority() case applyFirstMarker: @@ -188,10 +189,10 @@ func applyMarkers(ctx *schemaContext, markerSet markers.MarkerValues, props *api jPriority = crdmarkers.ApplyPriorityDefault } - return iPriority < jPriority + return int(iPriority - jPriority) } - sort.Slice(markers, func(i, j int) bool { return cmpPriority(markers, i, j) }) - sort.Slice(itemsMarkers, func(i, j int) bool { return cmpPriority(itemsMarkers, i, j) }) + slices.SortStableFunc(markers, func(i, j schemaMarkerWithName) int { return cmpPriority(i, j) }) + slices.SortStableFunc(itemsMarkers, func(i, j schemaMarkerWithName) int { return cmpPriority(i, j) }) for _, schemaMarker := range markers { if err := schemaMarker.SchemaMarker.ApplyToSchema(props); err != nil { @@ -213,8 +214,8 @@ func applyMarkers(ctx *schemaContext, markerSet markers.MarkerValues, props *api } // typeToSchema creates a schema for the given AST type. -func typeToSchema(ctx *schemaContext, rawType ast.Expr) *apiext.JSONSchemaProps { - var props *apiext.JSONSchemaProps +func typeToSchema(ctx *schemaContext, rawType ast.Expr) *apiextensionsv1.JSONSchemaProps { + var props *apiextensionsv1.JSONSchemaProps switch expr := rawType.(type) { case *ast.Ident: props = localNamedToSchema(ctx, expr) @@ -225,13 +226,13 @@ func typeToSchema(ctx *schemaContext, rawType ast.Expr) *apiext.JSONSchemaProps case *ast.MapType: props = mapToSchema(ctx, expr) case *ast.StarExpr: - props = typeToSchema(ctx, expr.X) + props = typeToSchema(ctx.ForInfo(&markers.TypeInfo{}), expr.X) case *ast.StructType: props = structToSchema(ctx, expr) default: ctx.pkg.AddError(loader.ErrFromNode(fmt.Errorf("unsupported AST kind %T", expr), rawType)) // NB(directxman12): we explicitly don't handle interfaces - return &apiext.JSONSchemaProps{} + return &apiextensionsv1.JSONSchemaProps{} } props.Description = ctx.info.Doc @@ -259,11 +260,11 @@ func TypeRefLink(pkgName, typeName string) string { // localNamedToSchema creates a schema (ref) for a *potentially* local type reference // (could be external from a dot-import). -func localNamedToSchema(ctx *schemaContext, ident *ast.Ident) *apiext.JSONSchemaProps { +func localNamedToSchema(ctx *schemaContext, ident *ast.Ident) *apiextensionsv1.JSONSchemaProps { typeInfo := ctx.pkg.TypesInfo.TypeOf(ident) if typeInfo == types.Typ[types.Invalid] { ctx.pkg.AddError(loader.ErrFromNode(fmt.Errorf("unknown type %s", ident.Name), ident)) - return &apiext.JSONSchemaProps{} + return &apiextensionsv1.JSONSchemaProps{} } // This reproduces the behavior we had pre gotypesalias=1 (needed if this // project is compiled with default settings and Go >= 1.23). @@ -283,13 +284,13 @@ func localNamedToSchema(ctx *schemaContext, ident *ast.Ident) *apiext.JSONSchema if basicInfo.Name() != ident.Name { ctx.requestSchema("", ident.Name) link := TypeRefLink("", ident.Name) - return &apiext.JSONSchemaProps{ + return &apiextensionsv1.JSONSchemaProps{ Type: typ, Format: fmt, Ref: &link, } } - return &apiext.JSONSchemaProps{ + return &apiextensionsv1.JSONSchemaProps{ Type: typ, Format: fmt, } @@ -304,24 +305,42 @@ func localNamedToSchema(ctx *schemaContext, ident *ast.Ident) *apiext.JSONSchema } ctx.requestSchema(pkgPath, typeNameInfo.Name()) link := TypeRefLink(pkgPath, typeNameInfo.Name()) - return &apiext.JSONSchemaProps{ - Ref: &link, + + // In cases where we have a named type, apply the type and format from the named schema + // to allow markers that need this information to apply correctly. + var typ, fmt string + if namedInfo, isNamed := typeInfo.(*types.Named); isNamed { + // We don't want/need to do this for structs, maps, or arrays. + // These are already handled in infoToSchema if they have custom marshalling. + if _, isBasic := namedInfo.Underlying().(*types.Basic); isBasic { + namedTypeInfo := ctx.schemaRequester.LookupType(ctx.pkg, namedInfo.Obj().Name()) + + namedSchema := infoToSchema(ctx.ForInfo(namedTypeInfo)) + typ = namedSchema.Type + fmt = namedSchema.Format + } + } + + return &apiextensionsv1.JSONSchemaProps{ + Type: typ, + Format: fmt, + Ref: &link, } } // namedSchema creates a schema (ref) for an explicitly external type reference. -func namedToSchema(ctx *schemaContext, named *ast.SelectorExpr) *apiext.JSONSchemaProps { +func namedToSchema(ctx *schemaContext, named *ast.SelectorExpr) *apiextensionsv1.JSONSchemaProps { typeInfoRaw := ctx.pkg.TypesInfo.TypeOf(named) if typeInfoRaw == types.Typ[types.Invalid] { ctx.pkg.AddError(loader.ErrFromNode(fmt.Errorf("unknown type %v.%s", named.X, named.Sel.Name), named)) - return &apiext.JSONSchemaProps{} + return &apiextensionsv1.JSONSchemaProps{} } typeInfo := typeInfoRaw.(interface{ Obj() *types.TypeName }) typeNameInfo := typeInfo.Obj() nonVendorPath := loader.NonVendorPath(typeNameInfo.Pkg().Path()) ctx.requestSchema(nonVendorPath, typeNameInfo.Name()) link := TypeRefLink(nonVendorPath, typeNameInfo.Name()) - return &apiext.JSONSchemaProps{ + return &apiextensionsv1.JSONSchemaProps{ Ref: &link, } // NB(directxman12): we special-case things like resource.Quantity during the "collapse" phase. @@ -329,12 +348,12 @@ func namedToSchema(ctx *schemaContext, named *ast.SelectorExpr) *apiext.JSONSche // arrayToSchema creates a schema for the items of the given array, dealing appropriately // with the special `[]byte` type (according to OpenAPI standards). -func arrayToSchema(ctx *schemaContext, array *ast.ArrayType) *apiext.JSONSchemaProps { +func arrayToSchema(ctx *schemaContext, array *ast.ArrayType) *apiextensionsv1.JSONSchemaProps { eltType := ctx.pkg.TypesInfo.TypeOf(array.Elt) if eltType == byteType && array.Len == nil { // byte slices are represented as base64-encoded strings // (the format is defined in OpenAPI v3, but not JSON Schema) - return &apiext.JSONSchemaProps{ + return &apiextensionsv1.JSONSchemaProps{ Type: "string", Format: "byte", } @@ -342,15 +361,15 @@ func arrayToSchema(ctx *schemaContext, array *ast.ArrayType) *apiext.JSONSchemaP // TODO(directxman12): backwards-compat would require access to markers from base info items := typeToSchema(ctx.ForInfo(&markers.TypeInfo{}), array.Elt) - return &apiext.JSONSchemaProps{ + return &apiextensionsv1.JSONSchemaProps{ Type: "array", - Items: &apiext.JSONSchemaPropsOrArray{Schema: items}, + Items: &apiextensionsv1.JSONSchemaPropsOrArray{Schema: items}, } } // mapToSchema creates a schema for items of the given map. Key types must eventually resolve // to string (other types aren't allowed by JSON, and thus the kubernetes API standards). -func mapToSchema(ctx *schemaContext, mapType *ast.MapType) *apiext.JSONSchemaProps { +func mapToSchema(ctx *schemaContext, mapType *ast.MapType) *apiextensionsv1.JSONSchemaProps { keyInfo := ctx.pkg.TypesInfo.TypeOf(mapType.Key) // check that we've got a type that actually corresponds to a string for keyInfo != nil { @@ -358,19 +377,19 @@ func mapToSchema(ctx *schemaContext, mapType *ast.MapType) *apiext.JSONSchemaPro case *types.Basic: if typedKey.Info()&types.IsString == 0 { ctx.pkg.AddError(loader.ErrFromNode(fmt.Errorf("map keys must be strings, not %s", keyInfo.String()), mapType.Key)) - return &apiext.JSONSchemaProps{} + return &apiextensionsv1.JSONSchemaProps{} } keyInfo = nil // stop iterating case *types.Named: keyInfo = typedKey.Underlying() default: ctx.pkg.AddError(loader.ErrFromNode(fmt.Errorf("map keys must be strings, not %s", keyInfo.String()), mapType.Key)) - return &apiext.JSONSchemaProps{} + return &apiextensionsv1.JSONSchemaProps{} } } // TODO(directxman12): backwards-compat would require access to markers from base info - var valSchema *apiext.JSONSchemaProps + var valSchema *apiextensionsv1.JSONSchemaProps switch val := mapType.Value.(type) { case *ast.Ident: valSchema = localNamedToSchema(ctx.ForInfo(&markers.TypeInfo{}), val) @@ -384,12 +403,12 @@ func mapToSchema(ctx *schemaContext, mapType *ast.MapType) *apiext.JSONSchemaPro valSchema = typeToSchema(ctx.ForInfo(&markers.TypeInfo{}), val) default: ctx.pkg.AddError(loader.ErrFromNode(fmt.Errorf("not a supported map value type: %T", mapType.Value), mapType.Value)) - return &apiext.JSONSchemaProps{} + return &apiextensionsv1.JSONSchemaProps{} } - return &apiext.JSONSchemaProps{ + return &apiextensionsv1.JSONSchemaProps{ Type: "object", - AdditionalProperties: &apiext.JSONSchemaPropsOrBool{ + AdditionalProperties: &apiextensionsv1.JSONSchemaPropsOrBool{ Schema: valSchema, Allows: true, /* set automatically by serialization, but useful for testing */ }, @@ -400,10 +419,10 @@ func mapToSchema(ctx *schemaContext, mapType *ast.MapType) *apiext.JSONSchemaPro // and can be flattened later with a Flattener. // //nolint:gocyclo -func structToSchema(ctx *schemaContext, structType *ast.StructType) *apiext.JSONSchemaProps { - props := &apiext.JSONSchemaProps{ +func structToSchema(ctx *schemaContext, structType *ast.StructType) *apiextensionsv1.JSONSchemaProps { + props := &apiextensionsv1.JSONSchemaProps{ Type: "object", - Properties: make(map[string]apiext.JSONSchemaProps), + Properties: make(map[string]apiextensionsv1.JSONSchemaProps), } if ctx.info.RawSpec.Type != structType { @@ -421,6 +440,11 @@ func structToSchema(ctx *schemaContext, structType *ast.StructType) *apiext.JSON ctx.pkg.AddError(loader.ErrFromNode(err, structType)) return props } + atLeastOneOf, err := oneOfValuesToSet(ctx.info.Markers[crdmarkers.ValidationAtLeastOneOfPrefix]) + if err != nil { + ctx.pkg.AddError(loader.ErrFromNode(err, structType)) + return props + } for _, field := range ctx.info.Fields { // Skip if the field is not an inline field, ignoreUnexportedFields is true, and the field is not exported @@ -463,16 +487,16 @@ func structToSchema(ctx *schemaContext, structType *ast.StructType) *apiext.JSON case field.Markers.Get("kubebuilder:validation:Optional") != nil: // explicitly optional - kubebuilder case field.Markers.Get("kubebuilder:validation:Required") != nil: - if exactlyOneOf.Has(fieldName) || atMostOneOf.Has(fieldName) { + if exactlyOneOf.Has(fieldName) || atMostOneOf.Has(fieldName) || atLeastOneOf.Has(fieldName) { ctx.pkg.AddError(loader.ErrFromNode(fmt.Errorf("field %s is part of OneOf constraint and cannot be marked as required", fieldName), structType)) return props } // explicitly required - kubebuilder props.Required = append(props.Required, fieldName) - case field.Markers.Get("optional") != nil: + case field.Markers.Get("optional") != nil, field.Markers.Get("k8s:optional") != nil: // explicitly optional - kubernetes - case field.Markers.Get("required") != nil: - if exactlyOneOf.Has(fieldName) || atMostOneOf.Has(fieldName) { + case field.Markers.Get("required") != nil, field.Markers.Get("k8s:required") != nil: + if exactlyOneOf.Has(fieldName) || atMostOneOf.Has(fieldName) || atLeastOneOf.Has(fieldName) { ctx.pkg.AddError(loader.ErrFromNode(fmt.Errorf("field %s is part of OneOf constraint and cannot be marked as required", fieldName), structType)) return props } @@ -483,7 +507,7 @@ func structToSchema(ctx *schemaContext, structType *ast.StructType) *apiext.JSON case defaultMode == "required": // ...everything that's not inline / omitempty is required if !inline && !omitEmpty { - if exactlyOneOf.Has(fieldName) || atMostOneOf.Has(fieldName) { + if exactlyOneOf.Has(fieldName) || atMostOneOf.Has(fieldName) || atLeastOneOf.Has(fieldName) { ctx.pkg.AddError(loader.ErrFromNode(fmt.Errorf("field %s is part of OneOf constraint and must have omitempty tag", fieldName), structType)) return props } @@ -495,9 +519,9 @@ func structToSchema(ctx *schemaContext, structType *ast.StructType) *apiext.JSON // implicitly optional } - var propSchema *apiext.JSONSchemaProps + var propSchema *apiextensionsv1.JSONSchemaProps if field.Markers.Get(crdmarkers.SchemalessName) != nil { - propSchema = &apiext.JSONSchemaProps{} + propSchema = &apiextensionsv1.JSONSchemaProps{} } else { propSchema = typeToSchema(ctx.ForInfo(&markers.TypeInfo{}), field.RawField.Type) } @@ -513,6 +537,9 @@ func structToSchema(ctx *schemaContext, structType *ast.StructType) *apiext.JSON props.Properties[fieldName] = *propSchema } + // Ensure the required fields are always listed alphabetically. + slices.Sort(props.Required) + return props } @@ -530,6 +557,11 @@ func oneOfValuesToSet(oneOfGroups []any) (sets.Set[string], error) { return nil, fmt.Errorf("%s: %w", crdmarkers.ValidationAtMostOneOfPrefix, err) } set.Insert(vals...) + case crdmarkers.AtLeastOneOf: + if err := validateOneOfValues(vals...); err != nil { + return nil, fmt.Errorf("%s: %w", crdmarkers.ValidationAtLeastOneOfPrefix, err) + } + set.Insert(vals...) default: return nil, fmt.Errorf("expected ExactlyOneOf or AtMostOneOf, got %T", oneOf) } diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/schema_visitor.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/schema_visitor.go index 2604d739b..bb6cb61bd 100644 --- a/vendor/sigs.k8s.io/controller-tools/pkg/crd/schema_visitor.go +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/schema_visitor.go @@ -17,7 +17,7 @@ limitations under the License. package crd import ( - apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" ) // SchemaVisitor walks the nodes of a schema. @@ -31,13 +31,13 @@ type SchemaVisitor interface { // It is *NOT* safe to save references to the given schema. // Make deepcopies if you need to keep things around beyond // the lifetime of the call. - Visit(schema *apiext.JSONSchemaProps) SchemaVisitor + Visit(schema *apiextensionsv1.JSONSchemaProps) SchemaVisitor } // EditSchema walks the given schema using the given visitor. Actual // pointers to each schema node are passed to the visitor, so any changes // made by the visitor will be reflected to the passed-in schema. -func EditSchema(schema *apiext.JSONSchemaProps, visitor SchemaVisitor) { +func EditSchema(schema *apiextensionsv1.JSONSchemaProps, visitor SchemaVisitor) { walker := schemaWalker{visitor: visitor} walker.walkSchema(schema) } @@ -55,7 +55,7 @@ type schemaWalker struct { // visitor will be used to visit all "children" of the current schema, followed // by a nil schema with the returned visitor to mark completion. If a nil visitor // is returned, traversal will no continue into the children of the current schema. -func (w schemaWalker) walkSchema(schema *apiext.JSONSchemaProps) { +func (w schemaWalker) walkSchema(schema *apiextensionsv1.JSONSchemaProps) { // Walk a potential chain of schema references, keeping track of seen // references to avoid circular references subVisitor := w.visitor @@ -104,7 +104,7 @@ func (w schemaWalker) walkSchema(schema *apiext.JSONSchemaProps) { } // walkMap walks over values of the given map, saving changes to them. -func (w schemaWalker) walkMap(defs map[string]apiext.JSONSchemaProps) { +func (w schemaWalker) walkMap(defs map[string]apiextensionsv1.JSONSchemaProps) { for name, def := range defs { // this is iter var reference is because we immediately preseve it below //nolint:gosec @@ -116,14 +116,14 @@ func (w schemaWalker) walkMap(defs map[string]apiext.JSONSchemaProps) { } // walkSlice walks over items of the given slice. -func (w schemaWalker) walkSlice(defs []apiext.JSONSchemaProps) { +func (w schemaWalker) walkSlice(defs []apiextensionsv1.JSONSchemaProps) { for i := range defs { w.walkSchema(&defs[i]) } } // walkPtr walks over the contents of the given pointer, if it's not nil. -func (w schemaWalker) walkPtr(def *apiext.JSONSchemaProps) { +func (w schemaWalker) walkPtr(def *apiextensionsv1.JSONSchemaProps) { if def == nil { return } diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/spec.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/spec.go index e4cbe3f73..cc6afe126 100644 --- a/vendor/sigs.k8s.io/controller-tools/pkg/crd/spec.go +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/spec.go @@ -13,15 +13,16 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + package crd import ( "fmt" - "sort" + "slices" "strings" "github.com/gobuffalo/flect" - apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "sigs.k8s.io/controller-tools/pkg/loader" @@ -32,7 +33,7 @@ import ( type SpecMarker interface { // ApplyToCRD applies this marker to the given CRD, in the given version // within that CRD. It's called after everything else in the CRD is populated. - ApplyToCRD(crd *apiext.CustomResourceDefinitionSpec, version string) error + ApplyToCRD(crd *apiextensionsv1.CustomResourceDefinitionSpec, version string) error } // Marker is a marker that knows how to apply itself to a particular @@ -40,7 +41,7 @@ type SpecMarker interface { type Marker interface { // ApplyToCRD applies this marker to the given CRD, in the given version // within that CRD. It's called after everything else in the CRD is populated. - ApplyToCRD(crd *apiext.CustomResourceDefinition, version string) error + ApplyToCRD(crd *apiextensionsv1.CustomResourceDefinition, version string) error } // NeedCRDFor requests the full CRD for the given group-kind. It requires @@ -62,23 +63,23 @@ func (p *Parser) NeedCRDFor(groupKind schema.GroupKind, maxDescLen *int) { } defaultPlural := strings.ToLower(flect.Pluralize(groupKind.Kind)) - crd := apiext.CustomResourceDefinition{ + crd := apiextensionsv1.CustomResourceDefinition{ TypeMeta: metav1.TypeMeta{ - APIVersion: apiext.SchemeGroupVersion.String(), + APIVersion: apiextensionsv1.SchemeGroupVersion.String(), Kind: "CustomResourceDefinition", }, ObjectMeta: metav1.ObjectMeta{ Name: defaultPlural + "." + groupKind.Group, }, - Spec: apiext.CustomResourceDefinitionSpec{ + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ Group: groupKind.Group, - Names: apiext.CustomResourceDefinitionNames{ + Names: apiextensionsv1.CustomResourceDefinitionNames{ Kind: groupKind.Kind, ListKind: groupKind.Kind + "List", Plural: defaultPlural, Singular: strings.ToLower(groupKind.Kind), }, - Scope: apiext.NamespaceScoped, + Scope: apiextensionsv1.NamespaceScoped, }, } @@ -94,10 +95,10 @@ func (p *Parser) NeedCRDFor(groupKind schema.GroupKind, maxDescLen *int) { if maxDescLen != nil { TruncateDescription(&fullSchema, *maxDescLen) } - ver := apiext.CustomResourceDefinitionVersion{ + ver := apiextensionsv1.CustomResourceDefinitionVersion{ Name: p.GroupVersions[pkg].Version, Served: true, - Schema: &apiext.CustomResourceValidation{ + Schema: &apiextensionsv1.CustomResourceValidation{ OpenAPIV3Schema: &fullSchema, // fine to take a reference since we deepcopy above }, } @@ -138,7 +139,7 @@ func (p *Parser) NeedCRDFor(groupKind schema.GroupKind, maxDescLen *int) { // it is necessary to make sure the order of CRD versions in crd.Spec.Versions is stable and explicitly set crd.Spec.Version. // Otherwise, crd.Spec.Version may point to different CRD versions across different runs. - sort.Slice(crd.Spec.Versions, func(i, j int) bool { return crd.Spec.Versions[i].Name < crd.Spec.Versions[j].Name }) + slices.SortStableFunc(crd.Spec.Versions, func(a, b apiextensionsv1.CustomResourceDefinitionVersion) int { return strings.Compare(a.Name, b.Name) }) // make sure we have *a* storage version // (default it if we only have one, otherwise, bail) diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/genall/genall.go b/vendor/sigs.k8s.io/controller-tools/pkg/genall/genall.go index b6f37409b..85333f88d 100644 --- a/vendor/sigs.k8s.io/controller-tools/pkg/genall/genall.go +++ b/vendor/sigs.k8s.io/controller-tools/pkg/genall/genall.go @@ -121,19 +121,19 @@ type GenerationContext struct { // WriteYAMLOptions implements the Options Pattern for WriteYAML. type WriteYAMLOptions struct { - transform func(obj map[string]interface{}) error + transform func(obj map[string]any) error } // WithTransform applies a transformation to objects just before writing them. -func WithTransform(transform func(obj map[string]interface{}) error) *WriteYAMLOptions { +func WithTransform(transform func(obj map[string]any) error) *WriteYAMLOptions { return &WriteYAMLOptions{ transform: transform, } } // TransformRemoveCreationTimestamp ensures we do not write the metadata.creationTimestamp field. -func TransformRemoveCreationTimestamp(obj map[string]interface{}) error { - metadata := obj["metadata"].(map[interface{}]interface{}) +func TransformRemoveCreationTimestamp(obj map[string]any) error { + metadata := obj["metadata"].(map[any]any) delete(metadata, "creationTimestamp") return nil } @@ -141,7 +141,7 @@ func TransformRemoveCreationTimestamp(obj map[string]interface{}) error { // WriteYAML writes the given objects out, serialized as YAML, using the // context's OutputRule. Objects are written as separate documents, separated // from each other by `---` (as per the YAML spec). -func (g GenerationContext) WriteYAML(itemPath, headerText string, objs []interface{}, options ...*WriteYAMLOptions) error { +func (g GenerationContext) WriteYAML(itemPath, headerText string, objs []any, options ...*WriteYAMLOptions) error { out, err := g.Open(nil, itemPath) if err != nil { return err @@ -171,7 +171,7 @@ func (g GenerationContext) WriteYAML(itemPath, headerText string, objs []interfa } // yamlMarshal is based on sigs.k8s.io/yaml.Marshal, but allows for transforming the final data before writing. -func yamlMarshal(o interface{}, options ...*WriteYAMLOptions) ([]byte, error) { +func yamlMarshal(o any, options ...*WriteYAMLOptions) ([]byte, error) { j, err := json.Marshal(o) if err != nil { return nil, fmt.Errorf("error marshaling into JSON: %w", err) @@ -183,10 +183,10 @@ func yamlMarshal(o interface{}, options ...*WriteYAMLOptions) ([]byte, error) { // yamlJSONToYAMLWithFilter is based on sigs.k8s.io/yaml.JSONToYAML, but allows for transforming the final data before writing. func yamlJSONToYAMLWithFilter(j []byte, options ...*WriteYAMLOptions) ([]byte, error) { // Convert the JSON to an object. - var jsonObj map[string]interface{} + var jsonObj map[string]any // We are using yaml.Unmarshal here (instead of json.Unmarshal) because the // Go JSON library doesn't try to pick the right number type (int, float, - // etc.) when unmarshalling to interface{}, it just picks float64 + // etc.) when unmarshalling to any, it just picks float64 // universally. go-yaml does go through the effort of picking the right // number type, so we can preserve number type throughout this process. if err := rawyaml.Unmarshal(j, &jsonObj); err != nil { diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/genall/options.go b/vendor/sigs.k8s.io/controller-tools/pkg/genall/options.go index 192235b76..8aa56a4bc 100644 --- a/vendor/sigs.k8s.io/controller-tools/pkg/genall/options.go +++ b/vendor/sigs.k8s.io/controller-tools/pkg/genall/options.go @@ -18,6 +18,7 @@ package genall import ( "fmt" + "maps" "strings" "golang.org/x/tools/go/packages" @@ -42,7 +43,7 @@ func RegisterOptionsMarkers(into *markers.Registry) error { return err } // NB(directxman12): we make this optional so we don't have a bootstrap problem with helpgen - if helpGiver, hasHelp := ((interface{})(InputPaths(nil))).(HasHelp); hasHelp { + if helpGiver, hasHelp := ((any)(InputPaths(nil))).(HasHelp); hasHelp { into.AddHelp(InputPathsMarker, helpGiver.Help()) } return nil @@ -100,9 +101,7 @@ func FromOptionsWithConfig(cfg *packages.Config, optionsRegistry *markers.Regist } outRules := DirectoryPerGenerator("config", protoRt.GeneratorsByName) - for gen, rule := range protoRt.OutputRules.ByGenerator { - outRules.ByGenerator[gen] = rule - } + maps.Copy(outRules.ByGenerator, protoRt.OutputRules.ByGenerator) genRuntime.OutputRules = outRules return genRuntime, nil diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/markers/collect.go b/vendor/sigs.k8s.io/controller-tools/pkg/markers/collect.go index 23f52e3d2..4e4a61cc1 100644 --- a/vendor/sigs.k8s.io/controller-tools/pkg/markers/collect.go +++ b/vendor/sigs.k8s.io/controller-tools/pkg/markers/collect.go @@ -36,11 +36,11 @@ type Collector struct { } // MarkerValues are all the values for some set of markers. -type MarkerValues map[string][]interface{} +type MarkerValues map[string][]any // Get fetches the first value that for the given marker, returning // nil if no values are available. -func (v MarkerValues) Get(name string) interface{} { +func (v MarkerValues) Get(name string) any { vals := v[name] if len(vals) == 0 { return nil @@ -109,7 +109,7 @@ func (c *Collector) parseMarkersInPackage(nodeMarkersRaw map[ast.Node][]markerCo default: target = DescribesType } - markerVals := make(map[string][]interface{}) + markerVals := make(map[string][]any) for _, markerRaw := range markersRaw { markerText := markerRaw.Text() def := c.Registry.Lookup(markerText, target) diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/markers/parse.go b/vendor/sigs.k8s.io/controller-tools/pkg/markers/parse.go index f75f21295..e0994983d 100644 --- a/vendor/sigs.k8s.io/controller-tools/pkg/markers/parse.go +++ b/vendor/sigs.k8s.io/controller-tools/pkg/markers/parse.go @@ -50,8 +50,8 @@ func peekNoSpace(scanner *sc.Scanner) rune { var ( // interfaceType is a pre-computed reflect.Type representing the empty interface. - interfaceType = reflect.TypeOf((*interface{})(nil)).Elem() - rawArgsType = reflect.TypeOf((*RawArguments)(nil)).Elem() + interfaceType = reflect.TypeFor[*any]().Elem() + rawArgsType = reflect.TypeFor[*RawArguments]().Elem() ) // lowerCamelCase converts PascalCase string to @@ -69,7 +69,7 @@ func lowerCamelCase(in string) string { // RawArguments is a special type that can be used for a marker // to receive *all* raw, underparsed argument data for a marker. -// You probably want to use `interface{}` to match any type instead. +// You probably want to use `any` to match any type instead. // Use *only* for legacy markers that don't follow Definition's normal // parsing logic. It should *not* be used as a field in a marker struct. type RawArguments []byte @@ -80,7 +80,7 @@ type RawArguments []byte type ArgumentType int const ( - // Invalid represents a type that can't be parsed, and should never be used. + // InvalidType represents a type that can't be parsed, and should never be used. InvalidType ArgumentType = iota // IntType is an int IntType @@ -183,13 +183,13 @@ func makeSliceType(itemType Argument) (reflect.Type, error) { var itemReflectedType reflect.Type switch itemType.Type { case IntType: - itemReflectedType = reflect.TypeOf(int(0)) + itemReflectedType = reflect.TypeFor[int]() case NumberType: - itemReflectedType = reflect.TypeOf(float64(0)) + itemReflectedType = reflect.TypeFor[float64]() case StringType: - itemReflectedType = reflect.TypeOf("") + itemReflectedType = reflect.TypeFor[string]() case BoolType: - itemReflectedType = reflect.TypeOf(false) + itemReflectedType = reflect.TypeFor[bool]() case SliceType: subItemType, err := makeSliceType(*itemType.ItemType) if err != nil { @@ -220,13 +220,13 @@ func makeMapType(itemType Argument) (reflect.Type, error) { var itemReflectedType reflect.Type switch itemType.Type { case IntType: - itemReflectedType = reflect.TypeOf(int(0)) + itemReflectedType = reflect.TypeFor[int]() case NumberType: - itemReflectedType = reflect.TypeOf(float64(0)) + itemReflectedType = reflect.TypeFor[float64]() case StringType: - itemReflectedType = reflect.TypeOf("") + itemReflectedType = reflect.TypeFor[string]() case BoolType: - itemReflectedType = reflect.TypeOf(false) + itemReflectedType = reflect.TypeFor[bool]() case SliceType: subItemType, err := makeSliceType(*itemType.ItemType) if err != nil { @@ -251,7 +251,7 @@ func makeMapType(itemType Argument) (reflect.Type, error) { itemReflectedType = reflect.PointerTo(itemReflectedType) } - return reflect.MapOf(reflect.TypeOf(""), itemReflectedType), nil + return reflect.MapOf(reflect.TypeFor[string](), itemReflectedType), nil } // guessType takes an educated guess about the type of the next field. If allowSlice @@ -825,7 +825,7 @@ type markerParser interface { // Parse uses the type information in this Definition to parse the given // raw marker in the form `+a:b:c=arg,d=arg` into an output object of the // type specified in the definition. -func (d *Definition) Parse(rawMarker string) (interface{}, error) { +func (d *Definition) Parse(rawMarker string) (any, error) { name, anonName, fields := splitMarker(rawMarker) outPointer := reflect.New(d.Output) @@ -929,7 +929,7 @@ func (d *Definition) Parse(rawMarker string) (interface{}, error) { // type, its public fields will automatically be populated into Fields (and similar // fields in Definition). Other values will have a single, empty-string-named Fields // entry. -func MakeDefinition(name string, target TargetType, output interface{}) (*Definition, error) { +func MakeDefinition(name string, target TargetType, output any) (*Definition, error) { def := &Definition{ Name: name, Target: target, @@ -945,9 +945,9 @@ func MakeDefinition(name string, target TargetType, output interface{}) (*Defini } // MakeAnyTypeDefinition constructs a definition for an output struct with a -// field named `Value` of type `interface{}`. The argument to the marker will +// field named `Value` of type `any`. The argument to the marker will // be parsed as AnyType and assigned to the field named `Value`. -func MakeAnyTypeDefinition(name string, target TargetType, output interface{}) (*Definition, error) { +func MakeAnyTypeDefinition(name string, target TargetType, output any) (*Definition, error) { defn, err := MakeDefinition(name, target, output) if err != nil { return nil, err diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/markers/reg.go b/vendor/sigs.k8s.io/controller-tools/pkg/markers/reg.go index 7dcd45899..0897e9437 100644 --- a/vendor/sigs.k8s.io/controller-tools/pkg/markers/reg.go +++ b/vendor/sigs.k8s.io/controller-tools/pkg/markers/reg.go @@ -54,7 +54,7 @@ func (r *Registry) init() { // It's a shortcut around // // r.Register(MakeDefinition(name, target, obj)) -func (r *Registry) Define(name string, target TargetType, obj interface{}) error { +func (r *Registry) Define(name string, target TargetType, obj any) error { def, err := MakeDefinition(name, target, obj) if err != nil { return err diff --git a/vendor/sigs.k8s.io/gateway-api/apis/v1/backendtlspolicy_types.go b/vendor/sigs.k8s.io/gateway-api/apis/v1/backendtlspolicy_types.go new file mode 100644 index 000000000..5a755eb1b --- /dev/null +++ b/vendor/sigs.k8s.io/gateway-api/apis/v1/backendtlspolicy_types.go @@ -0,0 +1,318 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion +// +kubebuilder:resource:categories=gateway-api,shortName=btlspolicy +// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` +// +// BackendTLSPolicy is a Direct Attached Policy. +// +kubebuilder:metadata:labels="gateway.networking.k8s.io/policy=Direct" + +// BackendTLSPolicy provides a way to configure how a Gateway +// connects to a Backend via TLS. +type BackendTLSPolicy struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Spec defines the desired state of BackendTLSPolicy. + // +required + Spec BackendTLSPolicySpec `json:"spec"` + + // Status defines the current state of BackendTLSPolicy. + // +optional + Status PolicyStatus `json:"status,omitempty"` +} + +// BackendTLSPolicyList contains a list of BackendTLSPolicies +// +kubebuilder:object:root=true +type BackendTLSPolicyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []BackendTLSPolicy `json:"items"` +} + +// BackendTLSPolicySpec defines the desired state of BackendTLSPolicy. +// +// Support: Extended +type BackendTLSPolicySpec struct { + // TargetRefs identifies an API object to apply the policy to. + // Only Services have Extended support. Implementations MAY support + // additional objects, with Implementation Specific support. + // Note that this config applies to the entire referenced resource + // by default, but this default may change in the future to provide + // a more granular application of the policy. + // + // TargetRefs must be _distinct_. This means either that: + // + // * They select different targets. If this is the case, then targetRef + // entries are distinct. In terms of fields, this means that the + // multi-part key defined by `group`, `kind`, and `name` must + // be unique across all targetRef entries in the BackendTLSPolicy. + // * They select different sectionNames in the same target. + // + // + // When more than one BackendTLSPolicy selects the same target and + // sectionName, implementations MUST determine precedence using the + // following criteria, continuing on ties: + // + // * The older policy by creation timestamp takes precedence. For + // example, a policy with a creation timestamp of "2021-07-15 + // 01:02:03" MUST be given precedence over a policy with a + // creation timestamp of "2021-07-15 01:02:04". + // * The policy appearing first in alphabetical order by {name}. + // For example, a policy named `bar` is given precedence over a + // policy named `baz`. + // + // For any BackendTLSPolicy that does not take precedence, the + // implementation MUST ensure the `Accepted` Condition is set to + // `status: False`, with Reason `Conflicted`. + // + // Support: Extended for Kubernetes Service + // + // Support: Implementation-specific for any other resource + // + // +required + // +listType=atomic + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=16 + // +kubebuilder:validation:XValidation:message="sectionName must be specified when targetRefs includes 2 or more references to the same target",rule="self.all(p1, self.all(p2, p1.group == p2.group && p1.kind == p2.kind && p1.name == p2.name ? ((!has(p1.sectionName) || p1.sectionName == '') == (!has(p2.sectionName) || p2.sectionName == '')) : true))" + // +kubebuilder:validation:XValidation:message="sectionName must be unique when targetRefs includes 2 or more references to the same target",rule="self.all(p1, self.exists_one(p2, p1.group == p2.group && p1.kind == p2.kind && p1.name == p2.name && (((!has(p1.sectionName) || p1.sectionName == '') && (!has(p2.sectionName) || p2.sectionName == '')) || (has(p1.sectionName) && has(p2.sectionName) && p1.sectionName == p2.sectionName))))" + TargetRefs []LocalPolicyTargetReferenceWithSectionName `json:"targetRefs"` + + // Validation contains backend TLS validation configuration. + // +required + Validation BackendTLSPolicyValidation `json:"validation"` + + // Options are a list of key/value pairs to enable extended TLS + // configuration for each implementation. For example, configuring the + // minimum TLS version or supported cipher suites. + // + // A set of common keys MAY be defined by the API in the future. To avoid + // any ambiguity, implementation-specific definitions MUST use + // domain-prefixed names, such as `example.com/my-custom-option`. + // Un-prefixed names are reserved for key names defined by Gateway API. + // + // Support: Implementation-specific + // + // +optional + // +kubebuilder:validation:MaxProperties=16 + Options map[AnnotationKey]AnnotationValue `json:"options,omitempty"` +} + +// BackendTLSPolicyValidation contains backend TLS validation configuration. +// +kubebuilder:validation:XValidation:message="must not contain both CACertificateRefs and WellKnownCACertificates",rule="!(has(self.caCertificateRefs) && size(self.caCertificateRefs) > 0 && has(self.wellKnownCACertificates) && self.wellKnownCACertificates != \"\")" +// +kubebuilder:validation:XValidation:message="must specify either CACertificateRefs or WellKnownCACertificates",rule="(has(self.caCertificateRefs) && size(self.caCertificateRefs) > 0 || has(self.wellKnownCACertificates) && self.wellKnownCACertificates != \"\")" +type BackendTLSPolicyValidation struct { + // CACertificateRefs contains one or more references to Kubernetes objects that + // contain a PEM-encoded TLS CA certificate bundle, which is used to + // validate a TLS handshake between the Gateway and backend Pod. + // + // If CACertificateRefs is empty or unspecified, then WellKnownCACertificates must be + // specified. Only one of CACertificateRefs or WellKnownCACertificates may be specified, + // not both. If CACertificateRefs is empty or unspecified, the configuration for + // WellKnownCACertificates MUST be honored instead if supported by the implementation. + // + // A CACertificateRef is invalid if: + // + // * It refers to a resource that cannot be resolved (e.g., the referenced resource + // does not exist) or is misconfigured (e.g., a ConfigMap does not contain a key + // named `ca.crt`). In this case, the Reason must be set to `InvalidCACertificateRef` + // and the Message of the Condition must indicate which reference is invalid and why. + // + // * It refers to an unknown or unsupported kind of resource. In this case, the Reason + // must be set to `InvalidKind` and the Message of the Condition must explain which + // kind of resource is unknown or unsupported. + // + // * It refers to a resource in another namespace. This may change in future + // spec updates. + // + // Implementations MAY choose to perform further validation of the certificate + // content (e.g., checking expiry or enforcing specific formats). In such cases, + // an implementation-specific Reason and Message must be set for the invalid reference. + // + // In all cases, the implementation MUST ensure the `ResolvedRefs` Condition on + // the BackendTLSPolicy is set to `status: False`, with a Reason and Message + // that indicate the cause of the error. Connections using an invalid + // CACertificateRef MUST fail, and the client MUST receive an HTTP 5xx error + // response. If ALL CACertificateRefs are invalid, the implementation MUST also + // ensure the `Accepted` Condition on the BackendTLSPolicy is set to + // `status: False`, with a Reason `NoValidCACertificate`. + // + // + // A single CACertificateRef to a Kubernetes ConfigMap kind has "Core" support. + // Implementations MAY choose to support attaching multiple certificates to + // a backend, but this behavior is implementation-specific. + // + // Support: Core - An optional single reference to a Kubernetes ConfigMap, + // with the CA certificate in a key named `ca.crt`. + // + // Support: Implementation-specific - More than one reference, other kinds + // of resources, or a single reference that includes multiple certificates. + // + // +optional + // +listType=atomic + // +kubebuilder:validation:MaxItems=8 + CACertificateRefs []LocalObjectReference `json:"caCertificateRefs,omitempty"` + + // WellKnownCACertificates specifies whether system CA certificates may be used in + // the TLS handshake between the gateway and backend pod. + // + // If WellKnownCACertificates is unspecified or empty (""), then CACertificateRefs + // must be specified with at least one entry for a valid configuration. Only one of + // CACertificateRefs or WellKnownCACertificates may be specified, not both. + // If an implementation does not support the WellKnownCACertificates field, or + // the supplied value is not recognized, the implementation MUST ensure the + // `Accepted` Condition on the BackendTLSPolicy is set to `status: False`, with + // a Reason `Invalid`. + // + // Support: Implementation-specific + // + // +optional + // +listType=atomic + WellKnownCACertificates *WellKnownCACertificatesType `json:"wellKnownCACertificates,omitempty"` + + // Hostname is used for two purposes in the connection between Gateways and + // backends: + // + // 1. Hostname MUST be used as the SNI to connect to the backend (RFC 6066). + // 2. Hostname MUST be used for authentication and MUST match the certificate + // served by the matching backend, unless SubjectAltNames is specified. + // 3. If SubjectAltNames are specified, Hostname can be used for certificate selection + // but MUST NOT be used for authentication. If you want to use the value + // of the Hostname field for authentication, you MUST add it to the SubjectAltNames list. + // + // Support: Core + // + // +required + Hostname PreciseHostname `json:"hostname"` + + // SubjectAltNames contains one or more Subject Alternative Names. + // When specified the certificate served from the backend MUST + // have at least one Subject Alternate Name matching one of the specified SubjectAltNames. + // + // Support: Extended + // + // +optional + // +listType=atomic + // +kubebuilder:validation:MaxItems=5 + SubjectAltNames []SubjectAltName `json:"subjectAltNames,omitempty"` +} + +// SubjectAltName represents Subject Alternative Name. +// +kubebuilder:validation:XValidation:message="SubjectAltName element must contain Hostname, if Type is set to Hostname",rule="!(self.type == \"Hostname\" && (!has(self.hostname) || self.hostname == \"\"))" +// +kubebuilder:validation:XValidation:message="SubjectAltName element must not contain Hostname, if Type is not set to Hostname",rule="!(self.type != \"Hostname\" && has(self.hostname) && self.hostname != \"\")" +// +kubebuilder:validation:XValidation:message="SubjectAltName element must contain URI, if Type is set to URI",rule="!(self.type == \"URI\" && (!has(self.uri) || self.uri == \"\"))" +// +kubebuilder:validation:XValidation:message="SubjectAltName element must not contain URI, if Type is not set to URI",rule="!(self.type != \"URI\" && has(self.uri) && self.uri != \"\")" +type SubjectAltName struct { + // Type determines the format of the Subject Alternative Name. Always required. + // + // Support: Core + // + // +required + Type SubjectAltNameType `json:"type"` + + // Hostname contains Subject Alternative Name specified in DNS name format. + // Required when Type is set to Hostname, ignored otherwise. + // + // Support: Core + // + // +optional + Hostname Hostname `json:"hostname,omitempty"` + + // URI contains Subject Alternative Name specified in a full URI format. + // It MUST include both a scheme (e.g., "http" or "ftp") and a scheme-specific-part. + // Common values include SPIFFE IDs like "spiffe://mycluster.example.com/ns/myns/sa/svc1sa". + // Required when Type is set to URI, ignored otherwise. + // + // Support: Core + // + // +optional + URI AbsoluteURI `json:"uri,omitempty"` +} + +// WellKnownCACertificatesType is the type of CA certificate that will be used +// when the caCertificateRefs field is unspecified. +// +kubebuilder:validation:Enum=System +type WellKnownCACertificatesType string + +const ( + // WellKnownCACertificatesSystem indicates that well known system CA certificates should be used. + WellKnownCACertificatesSystem WellKnownCACertificatesType = "System" +) + +// SubjectAltNameType is the type of the Subject Alternative Name. +// +kubebuilder:validation:Enum=Hostname;URI +type SubjectAltNameType string + +const ( + // HostnameSubjectAltNameType specifies hostname-based SAN. + // + // Support: Core + HostnameSubjectAltNameType SubjectAltNameType = "Hostname" + + // URISubjectAltNameType specifies URI-based SAN, e.g. SPIFFE id. + // + // Support: Core + URISubjectAltNameType SubjectAltNameType = "URI" +) + +const ( + // This reason is used with the "Accepted" condition when it is + // set to false because all CACertificateRefs of the + // BackendTLSPolicy are invalid. + BackendTLSPolicyReasonNoValidCACertificate PolicyConditionReason = "NoValidCACertificate" +) + +const ( + // This condition indicates whether the controller was able to resolve all + // object references for the BackendTLSPolicy. + // + // Possible reasons for this condition to be True are: + // + // * "ResolvedRefs" + // + // Possible reasons for this condition to be False are: + // + // * "InvalidCACertificateRef" + // * "InvalidKind" + // + // Controllers may raise this condition with other reasons, but should + // prefer to use the reasons listed above to improve interoperability. + BackendTLSPolicyConditionResolvedRefs PolicyConditionType = "ResolvedRefs" + + // This reason is used with the "ResolvedRefs" condition when the condition + // is true. + BackendTLSPolicyReasonResolvedRefs PolicyConditionReason = "ResolvedRefs" + + // This reason is used with the "ResolvedRefs" condition when one of the + // BackendTLSPolicy's CACertificateRefs is invalid. + // A CACertificateRef is considered invalid when it refers to a nonexistent + // resource or when the data within that resource is malformed. + BackendTLSPolicyReasonInvalidCACertificateRef PolicyConditionReason = "InvalidCACertificateRef" + + // This reason is used with the "ResolvedRefs" condition when one of the + // BackendTLSPolicy's CACertificateRefs references an unknown or unsupported + // Group and/or Kind. + BackendTLSPolicyReasonInvalidKind PolicyConditionReason = "InvalidKind" +) diff --git a/vendor/sigs.k8s.io/gateway-api/apis/v1/gateway_types.go b/vendor/sigs.k8s.io/gateway-api/apis/v1/gateway_types.go index caa5e96bf..58d975186 100644 --- a/vendor/sigs.k8s.io/gateway-api/apis/v1/gateway_types.go +++ b/vendor/sigs.k8s.io/gateway-api/apis/v1/gateway_types.go @@ -33,15 +33,18 @@ import ( // Gateway represents an instance of a service-traffic handling infrastructure // by binding Listeners to a set of IP addresses. type Gateway struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + // +optional metav1.ObjectMeta `json:"metadata,omitempty"` // Spec defines the desired state of Gateway. + // +required Spec GatewaySpec `json:"spec"` // Status defines the current state of Gateway. // // +kubebuilder:default={conditions: {{type: "Accepted", status: "Unknown", reason:"Pending", message:"Waiting for controller", lastTransitionTime: "1970-01-01T00:00:00Z"},{type: "Programmed", status: "Unknown", reason:"Pending", message:"Waiting for controller", lastTransitionTime: "1970-01-01T00:00:00Z"}}} + // +optional Status GatewayStatus `json:"status,omitempty"` } @@ -63,12 +66,15 @@ type GatewayList struct { type GatewaySpec struct { // GatewayClassName used for this Gateway. This is the name of a // GatewayClass resource. + // +required GatewayClassName ObjectName `json:"gatewayClassName"` // Listeners associated with this Gateway. Listeners define // logical endpoints that are bound on this Gateway's addresses. // At least one Listener MUST be specified. // + // ## Distinct Listeners + // // Each Listener in a set of Listeners (for example, in a single Gateway) // MUST be _distinct_, in that a traffic flow MUST be able to be assigned to // exactly one listener. (This section uses "set of Listeners" rather than @@ -80,55 +86,76 @@ type GatewaySpec struct { // combination of Port, Protocol, and, if supported by the protocol, Hostname. // // Some combinations of port, protocol, and TLS settings are considered - // Core support and MUST be supported by implementations based on their - // targeted conformance profile: + // Core support and MUST be supported by implementations based on the objects + // they support: // - // HTTP Profile + // HTTPRoute // // 1. HTTPRoute, Port: 80, Protocol: HTTP // 2. HTTPRoute, Port: 443, Protocol: HTTPS, TLS Mode: Terminate, TLS keypair provided // - // TLS Profile + // TLSRoute // // 1. TLSRoute, Port: 443, Protocol: TLS, TLS Mode: Passthrough // // "Distinct" Listeners have the following property: // - // The implementation can match inbound requests to a single distinct - // Listener. When multiple Listeners share values for fields (for + // **The implementation can match inbound requests to a single distinct + // Listener**. + // + // When multiple Listeners share values for fields (for // example, two Listeners with the same Port value), the implementation // can match requests to only one of the Listeners using other // Listener fields. // - // For example, the following Listener scenarios are distinct: + // When multiple listeners have the same value for the Protocol field, then + // each of the Listeners with matching Protocol values MUST have different + // values for other fields. + // + // The set of fields that MUST be different for a Listener differs per protocol. + // The following rules define the rules for what fields MUST be considered for + // Listeners to be distinct with each protocol currently defined in the + // Gateway API spec. + // + // The set of listeners that all share a protocol value MUST have _different_ + // values for _at least one_ of these fields to be distinct: // - // 1. Multiple Listeners with the same Port that all use the "HTTP" - // Protocol that all have unique Hostname values. - // 2. Multiple Listeners with the same Port that use either the "HTTPS" or - // "TLS" Protocol that all have unique Hostname values. - // 3. A mixture of "TCP" and "UDP" Protocol Listeners, where no Listener - // with the same Protocol has the same Port value. + // * **HTTP, HTTPS, TLS**: Port, Hostname + // * **TCP, UDP**: Port // - // Some fields in the Listener struct have possible values that affect - // whether the Listener is distinct. Hostname is particularly relevant - // for HTTP or HTTPS protocols. + // One **very** important rule to call out involves what happens when an + // implementation: // - // When using the Hostname value to select between same-Port, same-Protocol - // Listeners, the Hostname value must be different on each Listener for the - // Listener to be distinct. + // * Supports TCP protocol Listeners, as well as HTTP, HTTPS, or TLS protocol + // Listeners, and + // * sees HTTP, HTTPS, or TLS protocols with the same `port` as one with TCP + // Protocol. // - // When the Listeners are distinct based on Hostname, inbound request + // In this case all the Listeners that share a port with the + // TCP Listener are not distinct and so MUST NOT be accepted. + // + // If an implementation does not support TCP Protocol Listeners, then the + // previous rule does not apply, and the TCP Listeners SHOULD NOT be + // accepted. + // + // Note that the `tls` field is not used for determining if a listener is distinct, because + // Listeners that _only_ differ on TLS config will still conflict in all cases. + // + // ### Listeners that are distinct only by Hostname + // + // When the Listeners are distinct based only on Hostname, inbound request // hostnames MUST match from the most specific to least specific Hostname // values to choose the correct Listener and its associated set of Routes. // - // Exact matches must be processed before wildcard matches, and wildcard - // matches must be processed before fallback (empty Hostname value) + // Exact matches MUST be processed before wildcard matches, and wildcard + // matches MUST be processed before fallback (empty Hostname value) // matches. For example, `"foo.example.com"` takes precedence over // `"*.example.com"`, and `"*.example.com"` takes precedence over `""`. // // Additionally, if there are multiple wildcard entries, more specific // wildcard entries must be processed before less specific wildcard entries. // For example, `"*.foo.example.com"` takes precedence over `"*.example.com"`. + // // The precise definition here is that the higher the number of dots in the // hostname to the right of the wildcard character, the higher the precedence. // @@ -136,18 +163,26 @@ type GatewaySpec struct { // the left, however, so `"*.example.com"` will match both // `"foo.bar.example.com"` _and_ `"bar.example.com"`. // + // ## Handling indistinct Listeners + // // If a set of Listeners contains Listeners that are not distinct, then those - // Listeners are Conflicted, and the implementation MUST set the "Conflicted" + // Listeners are _Conflicted_, and the implementation MUST set the "Conflicted" // condition in the Listener Status to "True". // + // The words "indistinct" and "conflicted" are considered equivalent for the + // purpose of this documentation. + // // Implementations MAY choose to accept a Gateway with some Conflicted // Listeners only if they only accept the partial Listener set that contains - // no Conflicted Listeners. To put this another way, implementations may - // accept a partial Listener set only if they throw out *all* the conflicting - // Listeners. No picking one of the conflicting listeners as the winner. - // This also means that the Gateway must have at least one non-conflicting - // Listener in this case, otherwise it violates the requirement that at - // least one Listener must be present. + // no Conflicted Listeners. + // + // Specifically, an implementation MAY accept a partial Listener set subject to + // the following rules: + // + // * The implementation MUST NOT pick one conflicting Listener as the winner. + // ALL indistinct Listeners must not be accepted for processing. + // * At least one distinct Listener MUST be present, or else the Gateway effectively + // contains _no_ Listeners, and must be rejected from processing as a whole. // // The implementation MUST set a "ListenersNotValid" condition on the // Gateway Status when the Gateway contains Conflicted Listeners whether or @@ -156,7 +191,25 @@ type GatewaySpec struct { // Accepted. Additionally, the Listener status for those listeners SHOULD // indicate which Listeners are conflicted and not Accepted. // - // A Gateway's Listeners are considered "compatible" if: + // ## General Listener behavior + // + // Note that, for all distinct Listeners, requests SHOULD match at most one Listener. + // For example, if Listeners are defined for "foo.example.com" and "*.example.com", a + // request to "foo.example.com" SHOULD only be routed using routes attached + // to the "foo.example.com" Listener (and not the "*.example.com" Listener). + // + // This concept is known as "Listener Isolation", and it is an Extended feature + // of Gateway API. Implementations that do not support Listener Isolation MUST + // clearly document this, and MUST NOT claim support for the + // `GatewayHTTPListenerIsolation` feature. + // + // Implementations that _do_ support Listener Isolation SHOULD claim support + // for the Extended `GatewayHTTPListenerIsolation` feature and pass the associated + // conformance tests. + // + // ## Compatible Listeners + // + // A Gateway's Listeners are considered _compatible_ if: // // 1. They are distinct. // 2. The implementation can serve them in compliance with the Addresses @@ -171,16 +224,11 @@ type GatewaySpec struct { // on the same address, or cannot mix HTTPS and generic TLS listens on the same port // would not consider those cases compatible, even though they are distinct. // - // Note that requests SHOULD match at most one Listener. For example, if - // Listeners are defined for "foo.example.com" and "*.example.com", a - // request to "foo.example.com" SHOULD only be routed using routes attached - // to the "foo.example.com" Listener (and not the "*.example.com" Listener). - // This concept is known as "Listener Isolation". Implementations that do - // not support Listener Isolation MUST clearly document this. - // // Implementations MAY merge separate Gateways onto a single set of // Addresses if all Listeners across all Gateways are compatible. // + // In a future release the MinItems=1 requirement MAY be dropped. + // // Support: Core // // +listType=map @@ -192,12 +240,13 @@ type GatewaySpec struct { // +kubebuilder:validation:XValidation:message="hostname must not be specified for protocols ['TCP', 'UDP']",rule="self.all(l, l.protocol in ['TCP', 'UDP'] ? (!has(l.hostname) || l.hostname == '') : true)" // +kubebuilder:validation:XValidation:message="Listener name must be unique within the Gateway",rule="self.all(l1, self.exists_one(l2, l1.name == l2.name))" // +kubebuilder:validation:XValidation:message="Combination of port, protocol and hostname must be unique for each listener",rule="self.all(l1, self.exists_one(l2, l1.port == l2.port && l1.protocol == l2.protocol && (has(l1.hostname) && has(l2.hostname) ? l1.hostname == l2.hostname : !has(l1.hostname) && !has(l2.hostname))))" + // +required Listeners []Listener `json:"listeners"` // Addresses requested for this Gateway. This is optional and behavior can // depend on the implementation. If a value is set in the spec and the // requested address is invalid or unavailable, the implementation MUST - // indicate this in the associated entry in GatewayStatus.Addresses. + // indicate this in an associated entry in GatewayStatus.Conditions. // // The Addresses field represents a request for the address(es) on the // "outside of the Gateway", that traffic bound for this Gateway will use. @@ -216,19 +265,92 @@ type GatewaySpec struct { // Support: Extended // // +optional + // +listType=atomic // // +kubebuilder:validation:MaxItems=16 - // +kubebuilder:validation:XValidation:message="IPAddress values must be unique",rule="self.all(a1, a1.type == 'IPAddress' ? self.exists_one(a2, a2.type == a1.type && a2.value == a1.value) : true )" - // +kubebuilder:validation:XValidation:message="Hostname values must be unique",rule="self.all(a1, a1.type == 'Hostname' ? self.exists_one(a2, a2.type == a1.type && a2.value == a1.value) : true )" - Addresses []GatewayAddress `json:"addresses,omitempty"` + // +kubebuilder:validation:XValidation:message="IPAddress values must be unique",rule="self.all(a1, a1.type == 'IPAddress' && has(a1.value) ? self.exists_one(a2, a2.type == a1.type && has(a2.value) && a2.value == a1.value) : true )" + // +kubebuilder:validation:XValidation:message="Hostname values must be unique",rule="self.all(a1, a1.type == 'Hostname' && has(a1.value) ? self.exists_one(a2, a2.type == a1.type && has(a2.value) && a2.value == a1.value) : true )" + Addresses []GatewaySpecAddress `json:"addresses,omitempty"` // Infrastructure defines infrastructure level attributes about this Gateway instance. // - // Support: Core + // Support: Extended // - // // +optional Infrastructure *GatewayInfrastructure `json:"infrastructure,omitempty"` + + // AllowedListeners defines which ListenerSets can be attached to this Gateway. + // While this feature is experimental, the default value is to allow no ListenerSets. + // + // + // + // +optional + AllowedListeners *AllowedListeners `json:"allowedListeners,omitempty"` + // + // TLS specifies frontend and backend tls configuration for entire gateway. + // + // Support: Extended + // + // +optional + // + TLS *GatewayTLSConfig `json:"tls,omitempty"` + + // DefaultScope, when set, configures the Gateway as a default Gateway, + // meaning it will dynamically and implicitly have Routes (e.g. HTTPRoute) + // attached to it, according to the scope configured here. + // + // If unset (the default) or set to None, the Gateway will not act as a + // default Gateway; if set, the Gateway will claim any Route with a + // matching scope set in its UseDefaultGateway field, subject to the usual + // rules about which routes the Gateway can attach to. + // + // Think carefully before using this functionality! While the normal rules + // about which Route can apply are still enforced, it is simply easier for + // the wrong Route to be accidentally attached to this Gateway in this + // configuration. If the Gateway operator is not also the operator in + // control of the scope (e.g. namespace) with tight controls and checks on + // what kind of workloads and Routes get added in that scope, we strongly + // recommend not using this just because it seems convenient, and instead + // stick to direct Route attachment. + // + // +optional + // + DefaultScope GatewayDefaultScope `json:"defaultScope,omitempty"` +} + +// AllowedListeners defines which ListenerSets can be attached to this Gateway. +type AllowedListeners struct { + // Namespaces defines which namespaces ListenerSets can be attached to this Gateway. + // While this feature is experimental, the default value is to allow no ListenerSets. + // + // +optional + // +kubebuilder:default={from: None} + Namespaces *ListenerNamespaces `json:"namespaces,omitempty"` +} + +// ListenerNamespaces indicate which namespaces ListenerSets should be selected from. +type ListenerNamespaces struct { + // From indicates where ListenerSets can attach to this Gateway. Possible + // values are: + // + // * Same: Only ListenerSets in the same namespace may be attached to this Gateway. + // * Selector: ListenerSets in namespaces selected by the selector may be attached to this Gateway. + // * All: ListenerSets in all namespaces may be attached to this Gateway. + // * None: Only listeners defined in the Gateway's spec are allowed + // + // While this feature is experimental, the default value None + // + // +optional + // +kubebuilder:default=None + // +kubebuilder:validation:Enum=All;Selector;Same;None + From *FromNamespaces `json:"from,omitempty"` + + // Selector must be specified when From is set to "Selector". In that case, + // only ListenerSets in Namespaces matching this Selector will be selected by this + // Gateway. This field is ignored for other values of "From". + // + // +optional + Selector *metav1.LabelSelector `json:"selector,omitempty"` } // Listener embodies the concept of a logical endpoint where a Gateway accepts @@ -238,6 +360,7 @@ type Listener struct { // Gateway. // // Support: Core + // +required Name SectionName `json:"name"` // Hostname specifies the virtual hostname to match for protocol types that @@ -250,10 +373,31 @@ type Listener struct { // // * TLS: The Listener Hostname MUST match the SNI. // * HTTP: The Listener Hostname MUST match the Host header of the request. - // * HTTPS: The Listener Hostname SHOULD match at both the TLS and HTTP - // protocol layers as described above. If an implementation does not - // ensure that both the SNI and Host header match the Listener hostname, - // it MUST clearly document that. + // * HTTPS: The Listener Hostname SHOULD match both the SNI and Host header. + // Note that this does not require the SNI and Host header to be the same. + // The semantics of this are described in more detail below. + // + // To ensure security, Section 11.1 of RFC-6066 emphasizes that server + // implementations that rely on SNI hostname matching MUST also verify + // hostnames within the application protocol. + // + // Section 9.1.2 of RFC-7540 provides a mechanism for servers to reject the + // reuse of a connection by responding with the HTTP 421 Misdirected Request + // status code. This indicates that the origin server has rejected the + // request because it appears to have been misdirected. + // + // To detect misdirected requests, Gateways SHOULD match the authority of + // the requests with all the SNI hostname(s) configured across all the + // Gateway Listeners on the same port and protocol: + // + // * If another Listener has an exact match or more specific wildcard entry, + // the Gateway SHOULD return a 421. + // * If the current Listener (selected by SNI matching during ClientHello) + // does not match the Host: + // * If another Listener does match the Host the Gateway SHOULD return a + // 421. + // * If no other Listener matches the Host, the Gateway MUST return a + // 404. // // For HTTPRoute and TLSRoute resources, there is an interaction with the // `spec.hostnames` array. When both listener and route specify hostnames, @@ -274,18 +418,24 @@ type Listener struct { // same port, subject to the Listener compatibility rules. // // Support: Core + // + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 + // + // +required Port PortNumber `json:"port"` // Protocol specifies the network protocol this listener expects to receive. // // Support: Core + // +required Protocol ProtocolType `json:"protocol"` // TLS is the TLS configuration for the Listener. This field is required if // the Protocol field is "HTTPS" or "TLS". It is invalid to set this field // if the Protocol field is "HTTP", "TCP", or "UDP". // - // The association of SNIs to Certificate defined in GatewayTLSConfig is + // The association of SNIs to Certificate defined in ListenerTLSConfig is // defined based on the Hostname field for this listener. // // The GatewayClass MUST use the longest matching SNI out of all @@ -294,7 +444,7 @@ type Listener struct { // Support: Core // // +optional - TLS *GatewayTLSConfig `json:"tls,omitempty"` + TLS *ListenerTLSConfig `json:"tls,omitempty"` // AllowedRoutes defines the types of routes that MAY be attached to a // Listener and the trusted namespaces where those Route resources MAY be @@ -351,7 +501,7 @@ type Listener struct { // // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=255 -// +kubebuilder:validation:Pattern=`^[a-zA-Z0-9]([-a-zSA-Z0-9]*[a-zA-Z0-9])?$|[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*\/[A-Za-z0-9]+$` +// +kubebuilder:validation:Pattern=`^[a-zA-Z0-9]([-a-zA-Z0-9]*[a-zA-Z0-9])?$|[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*\/[A-Za-z0-9]+$` type ProtocolType string const ( @@ -374,10 +524,31 @@ const ( UDPProtocolType ProtocolType = "UDP" ) -// GatewayTLSConfig describes a TLS configuration. +// GatewayBackendTLS describes backend TLS configuration for gateway. +type GatewayBackendTLS struct { + // ClientCertificateRef is a reference to an object that contains a Client + // Certificate and the associated private key. + // + // References to a resource in different namespace are invalid UNLESS there + // is a ReferenceGrant in the target namespace that allows the certificate + // to be attached. If a ReferenceGrant does not allow this reference, the + // "ResolvedRefs" condition MUST be set to False for this listener with the + // "RefNotPermitted" reason. + // + // ClientCertificateRef can reference to standard Kubernetes resources, i.e. + // Secret, or implementation-specific custom resources. + // + // Support: Core + // + // +optional + // + ClientCertificateRef *SecretObjectReference `json:"clientCertificateRef,omitempty"` +} + +// ListenerTLSConfig describes a TLS configuration for a listener. // // +kubebuilder:validation:XValidation:message="certificateRefs or options must be specified when mode is Terminate",rule="self.mode == 'Terminate' ? size(self.certificateRefs) > 0 || size(self.options) > 0 : true" -type GatewayTLSConfig struct { +type ListenerTLSConfig struct { // Mode defines the TLS behavior for the TLS session initiated by the client. // There are two possible modes: // @@ -422,21 +593,10 @@ type GatewayTLSConfig struct { // Support: Implementation-specific (More than one reference or other resource types) // // +optional + // +listType=atomic // +kubebuilder:validation:MaxItems=64 CertificateRefs []SecretObjectReference `json:"certificateRefs,omitempty"` - // FrontendValidation holds configuration information for validating the frontend (client). - // Setting this field will require clients to send a client certificate - // required for validation during the TLS handshake. In browsers this may result in a dialog appearing - // that requests a user to specify the client certificate. - // The maximum depth of a certificate chain accepted in verification is Implementation specific. - // - // Support: Extended - // - // +optional - // - FrontendValidation *FrontendTLSValidation `json:"frontendValidation,omitempty"` - // Options are a list of key/value pairs to enable extended TLS // configuration for each implementation. For example, configuring the // minimum TLS version or supported cipher suites. @@ -453,6 +613,58 @@ type GatewayTLSConfig struct { Options map[AnnotationKey]AnnotationValue `json:"options,omitempty"` } +// GatewayTLSConfig specifies frontend and backend tls configuration for gateway. +type GatewayTLSConfig struct { + // Backend describes TLS configuration for gateway when connecting + // to backends. + // + // Note that this contains only details for the Gateway as a TLS client, + // and does _not_ imply behavior about how to choose which backend should + // get a TLS connection. That is determined by the presence of a BackendTLSPolicy. + // + // Support: Core + // + // +optional + // + Backend *GatewayBackendTLS `json:"backend,omitempty"` + + // Frontend describes TLS config when client connects to Gateway. + // Support: Core + // + // +optional + // + Frontend *FrontendTLSConfig `json:"frontend,omitempty"` +} + +// FrontendTLSConfig specifies frontend tls configuration for gateway. +type FrontendTLSConfig struct { + // Default specifies the default client certificate validation configuration + // for all Listeners handling HTTPS traffic, unless a per-port configuration + // is defined. + // + // support: Core + // + // +required + // + Default TLSConfig `json:"default"` + + // PerPort specifies tls configuration assigned per port. + // Per port configuration is optional. Once set this configuration overrides + // the default configuration for all Listeners handling HTTPS traffic + // that match this port. + // Each override port requires a unique TLS configuration. + // + // support: Core + // + // +optional + // +listType=map + // +listMapKey=port + // +kubebuilder:validation:MaxItems=64 + // +kubebuilder:validation:XValidation:message="Port for TLS configuration must be unique within the Gateway",rule="self.all(t1, self.exists_one(t2, t1.port == t2.port))" + // + PerPort []TLSPortConfig `json:"perPort,omitempty"` +} + // TLSModeType type defines how a Gateway handles TLS sessions. // // +kubebuilder:validation:Enum=Terminate;Passthrough @@ -471,6 +683,46 @@ const ( TLSModePassthrough TLSModeType = "Passthrough" ) +// TLSConfig describes TLS configuration that can apply to multiple Listeners +// within this Gateway. Currently, it stores only the client certificate validation +// configuration, but this may be extended in the future. +type TLSConfig struct { + // Validation holds configuration information for validating the frontend (client). + // Setting this field will result in mutual authentication when connecting to the gateway. + // In browsers this may result in a dialog appearing + // that requests a user to specify the client certificate. + // The maximum depth of a certificate chain accepted in verification is Implementation specific. + // + // Support: Core + // + // +optional + // + Validation *FrontendTLSValidation `json:"validation,omitempty"` +} + +type TLSPortConfig struct { + // The Port indicates the Port Number to which the TLS configuration will be + // applied. This configuration will be applied to all Listeners handling HTTPS + // traffic that match this port. + // + // Support: Core + // + // +required + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 + // + Port PortNumber `json:"port"` + + // TLS store the configuration that will be applied to all Listeners handling + // HTTPS traffic and matching given port. + // + // Support: Core + // + // +required + // + TLS TLSConfig `json:"tls"` +} + // FrontendTLSValidation holds configuration information that can be used to validate // the frontend initiating the TLS connection type FrontendTLSValidation struct { @@ -487,8 +739,8 @@ type FrontendTLSValidation struct { // Support: Core - A single reference to a Kubernetes ConfigMap // with the CA certificate in a key named `ca.crt`. // - // Support: Implementation-specific (More than one reference, or other kinds - // of resources). + // Support: Implementation-specific (More than one certificate in a ConfigMap + // with different keys or more than one reference, or other kinds of resources). // // References to a resource in a different namespace are invalid UNLESS there // is a ReferenceGrant in the target namespace that allows the certificate @@ -496,11 +748,53 @@ type FrontendTLSValidation struct { // "ResolvedRefs" condition MUST be set to False for this listener with the // "RefNotPermitted" reason. // + // +required + // +listType=atomic // +kubebuilder:validation:MaxItems=8 // +kubebuilder:validation:MinItems=1 - CACertificateRefs []ObjectReference `json:"caCertificateRefs,omitempty"` + CACertificateRefs []ObjectReference `json:"caCertificateRefs"` + + // FrontendValidationMode defines the mode for validating the client certificate. + // There are two possible modes: + // + // - AllowValidOnly: In this mode, the gateway will accept connections only if + // the client presents a valid certificate. This certificate must successfully + // pass validation against the CA certificates specified in `CACertificateRefs`. + // - AllowInsecureFallback: In this mode, the gateway will accept connections + // even if the client certificate is not presented or fails verification. + // + // This approach delegates client authorization to the backend and introduce + // a significant security risk. It should be used in testing environments or + // on a temporary basis in non-testing environments. + // + // Defaults to AllowValidOnly. + // + // Support: Core + // + // +optional + // +kubebuilder:default=AllowValidOnly + Mode FrontendValidationModeType `json:"mode,omitempty"` } +// FrontendValidationModeType type defines how a Gateway validates client certificates. +// +// +kubebuilder:validation:Enum=AllowValidOnly;AllowInsecureFallback +type FrontendValidationModeType string + +const ( + // AllowValidOnly indicates that a client certificate is required + // during the TLS handshake and MUST pass validation. + // + // Support: Core + AllowValidOnly FrontendValidationModeType = "AllowValidOnly" + + // AllowInsecureFallback indicates that a client certificate may not be + // presented during the handshake or the validation against CA certificates may fail. + // + // Support: Extended + AllowInsecureFallback FrontendValidationModeType = "AllowInsecureFallback" +) + // AllowedRoutes defines which Routes may be attached to this Listener. type AllowedRoutes struct { // Namespaces indicates namespaces from which Routes may be attached to this @@ -509,6 +803,7 @@ type AllowedRoutes struct { // Support: Core // // +optional + // +listType=atomic // +kubebuilder:default={from: Same} Namespaces *RouteNamespaces `json:"namespaces,omitempty"` @@ -525,25 +820,26 @@ type AllowedRoutes struct { // Support: Core // // +optional + // +listType=atomic // +kubebuilder:validation:MaxItems=8 Kinds []RouteGroupKind `json:"kinds,omitempty"` } -// FromNamespaces specifies namespace from which Routes may be attached to a +// FromNamespaces specifies namespace from which Routes/ListenerSets may be attached to a // Gateway. -// -// +kubebuilder:validation:Enum=All;Selector;Same type FromNamespaces string const ( - // Routes in all namespaces may be attached to this Gateway. + // Routes/ListenerSets in all namespaces may be attached to this Gateway. NamespacesFromAll FromNamespaces = "All" - // Only Routes in namespaces selected by the selector may be attached to + // Only Routes/ListenerSets in namespaces selected by the selector may be attached to // this Gateway. NamespacesFromSelector FromNamespaces = "Selector" - // Only Routes in the same namespace as the Gateway may be attached to this + // Only Routes/ListenerSets in the same namespace as the Gateway may be attached to this // Gateway. NamespacesFromSame FromNamespaces = "Same" + // No Routes/ListenerSets may be attached to this Gateway. + NamespacesFromNone FromNamespaces = "None" ) // RouteNamespaces indicate which namespaces Routes should be selected from. @@ -560,6 +856,7 @@ type RouteNamespaces struct { // // +optional // +kubebuilder:default=Same + // +kubebuilder:validation:Enum=All;Selector;Same From *FromNamespaces `json:"from,omitempty"` // Selector must be specified when From is set to "Selector". In that case, @@ -581,27 +878,31 @@ type RouteGroupKind struct { Group *Group `json:"group,omitempty"` // Kind is the kind of the Route. + // +required Kind Kind `json:"kind"` } -// GatewayAddress describes an address that can be bound to a Gateway. +// GatewaySpecAddress describes an address that can be bound to a Gateway. // -// +kubebuilder:validation:XValidation:message="Hostname value must only contain valid characters (matching ^(\\*\\.)?[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$)",rule="self.type == 'Hostname' ? self.value.matches(r\"\"\"^(\\*\\.)?[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$\"\"\"): true" -type GatewayAddress struct { +// +kubebuilder:validation:XValidation:message="Hostname value must be empty or contain only valid characters (matching ^(\\*\\.)?[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$)",rule="self.type == 'Hostname' ? (!has(self.value) || self.value.matches(r\"\"\"^(\\*\\.)?[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$\"\"\")): true" +type GatewaySpecAddress struct { // Type of the address. // // +optional // +kubebuilder:default=IPAddress Type *AddressType `json:"type,omitempty"` - // Value of the address. The validity of the values will depend - // on the type and support by the controller. + // When a value is unspecified, an implementation SHOULD automatically + // assign an address matching the requested type if possible. + // + // If an implementation does not support an empty value, they MUST set the + // "Programmed" condition in status to False with a reason of "AddressNotAssigned". // // Examples: `1.2.3.4`, `128::1`, `my-ip-address`. // - // +kubebuilder:validation:MinLength=1 + // +optional // +kubebuilder:validation:MaxLength=253 - Value string `json:"value"` + Value string `json:"value,omitempty"` } // GatewayStatusAddress describes a network address that is bound to a Gateway. @@ -621,6 +922,7 @@ type GatewayStatusAddress struct { // // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=253 + // +required Value string `json:"value"` } @@ -637,6 +939,7 @@ type GatewayStatus struct { // * a specified address was unusable (e.g. already in use) // // +optional + // +listType=atomic // // +kubebuilder:validation:MaxItems=16 Addresses []GatewayStatusAddress `json:"addresses,omitempty"` @@ -654,6 +957,34 @@ type GatewayStatus struct { // * "Programmed" // * "Ready" // + // + // Notes for implementors: + // + // Conditions are a listType `map`, which means that they function like a + // map with a key of the `type` field _in the k8s apiserver_. + // + // This means that implementations must obey some rules when updating this + // section. + // + // * Implementations MUST perform a read-modify-write cycle on this field + // before modifying it. That is, when modifying this field, implementations + // must be confident they have fetched the most recent version of this field, + // and ensure that changes they make are on that recent version. + // * Implementations MUST NOT remove or reorder Conditions that they are not + // directly responsible for. For example, if an implementation sees a Condition + // with type `special.io/SomeField`, it MUST NOT remove, change or update that + // Condition. + // * Implementations MUST always _merge_ changes into Conditions of the same Type, + // rather than creating more than one Condition of the same Type. + // * Implementations MUST always update the `observedGeneration` field of the + // Condition to the `metadata.generation` of the Gateway at the time of update creation. + // * If the `observedGeneration` of a Condition is _greater than_ the value the + // implementation knows about, then it MUST NOT perform the update on that Condition, + // but must wait for a future reconciliation and status update. (The assumption is that + // the implementation's copy of the object is stale and an update will be re-triggered + // if relevant.) + // + // // +optional // +listType=map // +listMapKey=type @@ -679,11 +1010,16 @@ type GatewayInfrastructure struct { // // An implementation may chose to add additional implementation-specific labels as they see fit. // + // If an implementation maps these labels to Pods, or any other resource that would need to be recreated when labels + // change, it SHOULD clearly warn about this behavior in documentation. + // // Support: Extended // // +optional // +kubebuilder:validation:MaxProperties=8 - Labels map[AnnotationKey]AnnotationValue `json:"labels,omitempty"` + // +kubebuilder:validation:XValidation:message="Label keys must be in the form of an optional DNS subdomain prefix followed by a required name segment of up to 63 characters.",rule="self.all(key, key.matches(r\"\"\"^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?([A-Za-z0-9][-A-Za-z0-9_.]{0,61})?[A-Za-z0-9]$\"\"\"))" + // +kubebuilder:validation:XValidation:message="If specified, the label key's prefix must be a DNS subdomain not longer than 253 characters in total.",rule="self.all(key, key.split(\"/\")[0].size() < 253)" + Labels map[LabelKey]LabelValue `json:"labels,omitempty"` // Annotations that SHOULD be applied to any resources created in response to this Gateway. // @@ -696,6 +1032,8 @@ type GatewayInfrastructure struct { // // +optional // +kubebuilder:validation:MaxProperties=8 + // +kubebuilder:validation:XValidation:message="Annotation keys must be in the form of an optional DNS subdomain prefix followed by a required name segment of up to 63 characters.",rule="self.all(key, key.matches(r\"\"\"^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?([A-Za-z0-9][-A-Za-z0-9_.]{0,61})?[A-Za-z0-9]$\"\"\"))" + // +kubebuilder:validation:XValidation:message="If specified, the annotation key's prefix must be a DNS subdomain not longer than 253 characters in total.",rule="self.all(key, key.split(\"/\")[0].size() < 253)" Annotations map[AnnotationKey]AnnotationValue `json:"annotations,omitempty"` // ParametersRef is a reference to a resource that contains the configuration @@ -708,6 +1046,11 @@ type GatewayInfrastructure struct { // the merging behavior is implementation specific. // It is generally recommended that GatewayClass provides defaults that can be overridden by a Gateway. // + // If the referent cannot be found, refers to an unsupported kind, or when + // the data within that resource is malformed, the Gateway SHOULD be + // rejected with the "Accepted" status condition set to "False" and an + // "InvalidParameters" reason. + // // Support: Implementation-specific // // +optional @@ -718,15 +1061,18 @@ type GatewayInfrastructure struct { // configuration resource within the namespace. type LocalParametersReference struct { // Group is the group of the referent. + // +required Group Group `json:"group"` // Kind is kind of the referent. + // +required Kind Kind `json:"kind"` // Name is the name of the referent. // // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=253 + // +required Name string `json:"name"` } @@ -814,6 +1160,13 @@ const ( // information on which address is causing the problem and how to resolve it // in the condition message. GatewayReasonAddressNotUsable GatewayConditionReason = "AddressNotUsable" + // This condition indicates `FrontendValidationModeType` changed from + // `AllowValidOnly` to `AllowInsecureFallback`. + GatewayConditionInsecureFrontendValidationMode GatewayConditionReason = "InsecureFrontendValidationMode" + // This reason MUST be set for GatewayConditionInsecureFrontendValidationMode + // when client change FrontendValidationModeType for a Gateway or per port override + // to `AllowInsecureFallback`. + GatewayReasonConfigurationChanged GatewayConditionReason = "ConfigurationChanged" ) const ( @@ -907,9 +1260,41 @@ const ( GatewayReasonListenersNotReady GatewayConditionReason = "ListenersNotReady" ) +const ( + // AttachedListenerSets is a condition that is true when the Gateway has + // at least one ListenerSet attached to it. + // + // Possible reasons for this condition to be True are: + // + // * "ListenerSetsAttached" + // + // Possible reasons for this condition to be False are: + // + // * "NoListenerSetsAttached" + // * "ListenerSetsNotAllowed" + // + // Controllers may raise this condition with other reasons, + // but should prefer to use the reasons listed above to improve + // interoperability. + GatewayConditionAttachedListenerSets GatewayConditionType = "AttachedListenerSets" + + // This reason is used with the "AttachedListenerSets" condition when the + // Gateway has at least one ListenerSet attached to it. + GatewayReasonListenerSetsAttached GatewayConditionReason = "ListenerSetsAttached" + + // This reason is used with the "AttachedListenerSets" condition when the + // Gateway has no ListenerSets attached to it. + GatewayReasonNoListenerSetsAttached GatewayConditionReason = "NoListenerSetsAttached" + + // This reason is used with the "AttachedListenerSets" condition when the + // Gateway has ListenerSets attached to it, but the ListenerSets are not allowed. + GatewayReasonListenerSetsNotAllowed GatewayConditionReason = "ListenerSetsNotAllowed" +) + // ListenerStatus is the status associated with a Listener. type ListenerStatus struct { // Name is the name of the Listener that this status corresponds to. + // +required Name SectionName `json:"name"` // SupportedKinds is the list indicating the Kinds supported by this @@ -922,6 +1307,8 @@ type ListenerStatus struct { // and invalid Route kinds are specified, the implementation MUST // reference the valid Route kinds that have been specified. // + // +required + // +listType=atomic // +kubebuilder:validation:MaxItems=8 SupportedKinds []RouteGroupKind `json:"supportedKinds"` @@ -942,13 +1329,45 @@ type ListenerStatus struct { // // Uses for this field include troubleshooting Route attachment and // measuring blast radius/impact of changes to a Listener. + // +required AttachedRoutes int32 `json:"attachedRoutes"` // Conditions describe the current condition of this listener. // + // + // + // Notes for implementors: + // + // Conditions are a listType `map`, which means that they function like a + // map with a key of the `type` field _in the k8s apiserver_. + // + // This means that implementations must obey some rules when updating this + // section. + // + // * Implementations MUST perform a read-modify-write cycle on this field + // before modifying it. That is, when modifying this field, implementations + // must be confident they have fetched the most recent version of this field, + // and ensure that changes they make are on that recent version. + // * Implementations MUST NOT remove or reorder Conditions that they are not + // directly responsible for. For example, if an implementation sees a Condition + // with type `special.io/SomeField`, it MUST NOT remove, change or update that + // Condition. + // * Implementations MUST always _merge_ changes into Conditions of the same Type, + // rather than creating more than one Condition of the same Type. + // * Implementations MUST always update the `observedGeneration` field of the + // Condition to the `metadata.generation` of the Gateway at the time of update creation. + // * If the `observedGeneration` of a Condition is _greater than_ the value the + // implementation knows about, then it MUST NOT perform the update on that Condition, + // but must wait for a future reconciliation and status update. (The assumption is that + // the implementation's copy of the object is stale and an update will be re-triggered + // if relevant.) + // + // + // // +listType=map // +listMapKey=type // +kubebuilder:validation:MaxItems=8 + // +required Conditions []metav1.Condition `json:"conditions"` } @@ -1145,6 +1564,62 @@ const ( ListenerReasonPending ListenerConditionReason = "Pending" ) +const ( + // This condition indicates that TLS configuration within this Listener + // conflicts with TLS configuration in another Listener on the same port. + // This could happen for two reasons: + // + // 1) Overlapping Hostnames: Listener A matches *.example.com while Listener + // B matches foo.example.com. + // B) Overlapping Certificates: Listener A contains a certificate with a + // SAN for *.example.com, while Listener B contains a certificate with a + // SAN for foo.example.com. + // + // This overlapping TLS configuration can be particularly problematic when + // combined with HTTP connection coalescing. When clients reuse connections + // using this technique, it can have confusing interactions with Gateway + // API, such as TLS configuration for one Listener getting used for a + // request reusing an existing connection that would not be used if the same + // request was initiating a new connection. + // + // Controllers MUST detect the presence of overlapping hostnames and MAY + // detect the presence of overlapping certificates. + // + // This condition MUST be set on all Listeners with overlapping TLS config. + // For example, consider the following listener - hostname mapping: + // + // A: foo.example.com + // B: foo.example.org + // C: *.example.com + // + // In the above example, Listeners A and C would have overlapping hostnames + // and therefore this condition should be set for Listeners A and C, but not + // B. + // + // Possible reasons for this condition to be True are: + // + // * "OverlappingHostnames" + // * "OverlappingCertificates" + // + // If a controller supports checking for both possible reasons and finds + // that both are true, it SHOULD set the "OverlappingCertificates" Reason. + // + // This is a negative polarity condition and MUST NOT be set when it is + // False. + // + // Controllers may raise this condition with other reasons, but should + // prefer to use the reasons listed above to improve interoperability. + ListenerConditionOverlappingTLSConfig ListenerConditionType = "OverlappingTLSConfig" + + // This reason is used with the "OverlappingTLSConfig" condition when the + // condition is true. + ListenerReasonOverlappingHostnames ListenerConditionReason = "OverlappingHostnames" + + // This reason is used with the "OverlappingTLSConfig" condition when the + // condition is true. + ListenerReasonOverlappingCertificates ListenerConditionReason = "OverlappingCertificates" +) + const ( // "Ready" is a condition type reserved for future use. It should not be used by implementations. // Note: This condition is not really "deprecated", but rather "reserved"; however, deprecated triggers Go linters diff --git a/vendor/sigs.k8s.io/gateway-api/apis/v1/gatewayclass_types.go b/vendor/sigs.k8s.io/gateway-api/apis/v1/gatewayclass_types.go index 21875dce1..972d35045 100644 --- a/vendor/sigs.k8s.io/gateway-api/apis/v1/gatewayclass_types.go +++ b/vendor/sigs.k8s.io/gateway-api/apis/v1/gatewayclass_types.go @@ -49,10 +49,12 @@ import ( // // GatewayClass is a Cluster level resource. type GatewayClass struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + // +optional metav1.ObjectMeta `json:"metadata,omitempty"` // Spec defines the desired state of GatewayClass. + // +required Spec GatewayClassSpec `json:"spec"` // Status defines the current state of GatewayClass. @@ -60,7 +62,8 @@ type GatewayClass struct { // Implementations MUST populate status on all GatewayClass resources which // specify their controller name. // - // +kubebuilder:default={conditions: {{type: "Accepted", status: "Unknown", message: "Waiting for controller", reason: "Waiting", lastTransitionTime: "1970-01-01T00:00:00Z"}}} + // +kubebuilder:default={conditions: {{type: "Accepted", status: "Unknown", message: "Waiting for controller", reason: "Pending", lastTransitionTime: "1970-01-01T00:00:00Z"}}} + // +optional Status GatewayClassStatus `json:"status,omitempty"` } @@ -83,6 +86,7 @@ type GatewayClassSpec struct { // Support: Core // // +kubebuilder:validation:XValidation:message="Value is immutable",rule="self == oldSelf" + // +required ControllerName GatewayController `json:"controllerName"` // ParametersRef is a reference to a resource that contains the configuration @@ -93,8 +97,10 @@ type GatewayClassSpec struct { // or an implementation-specific custom resource. The resource can be // cluster-scoped or namespace-scoped. // - // If the referent cannot be found, the GatewayClass's "InvalidParameters" - // status condition will be true. + // If the referent cannot be found, refers to an unsupported kind, or when + // the data within that resource is malformed, the GatewayClass SHOULD be + // rejected with the "Accepted" status condition set to "False" and an + // "InvalidParameters" reason. // // A Gateway for this GatewayClass may provide its own `parametersRef`. When both are specified, // the merging behavior is implementation specific. @@ -116,15 +122,18 @@ type GatewayClassSpec struct { // configuration resource within the cluster. type ParametersReference struct { // Group is the group of the referent. + // +required Group Group `json:"group"` // Kind is kind of the referent. + // +required Kind Kind `json:"kind"` // Name is the name of the referent. // // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=253 + // +required Name string `json:"name"` // Namespace is the namespace of the referent. @@ -162,6 +171,7 @@ const ( // Possible reasons for this condition to be False are: // // * "InvalidParameters" + // * "Unsupported" // * "UnsupportedVersion" // // Possible reasons for this condition to be Unknown are: @@ -176,9 +186,13 @@ const ( // true. GatewayClassReasonAccepted GatewayClassConditionReason = "Accepted" - // This reason is used with the "Accepted" condition when the - // GatewayClass was not accepted because the parametersRef field - // was invalid, with more detail in the message. + // This reason is used with the "Accepted" condition when the GatewayClass + // was not accepted because the parametersRef field refers to + // * a namespaced resource but the Namespace field is not set, or + // * a cluster-scoped resource but the Namespace field is set, or + // * a nonexistent object, or + // * an unsupported resource or kind, or + // * an existing resource but the data within that resource is malformed. GatewayClassReasonInvalidParameters GatewayClassConditionReason = "InvalidParameters" // This reason is used with the "Accepted" condition when the @@ -187,6 +201,11 @@ const ( // GatewayClass. GatewayClassReasonPending GatewayClassConditionReason = "Pending" + // This reason is used with the "Accepted" condition when the GatewayClass + // was not accepted because the implementation does not support a + // user-defined GatewayClass. + GatewayClassReasonUnsupported GatewayClassConditionReason = "Unsupported" + // Deprecated: Use "Pending" instead. GatewayClassReasonWaiting GatewayClassConditionReason = "Waiting" ) @@ -244,6 +263,35 @@ type GatewayClassStatus struct { // Controllers should prefer to publish conditions using values // of GatewayClassConditionType for the type of each Condition. // + // + // Notes for implementors: + // + // Conditions are a listType `map`, which means that they function like a + // map with a key of the `type` field _in the k8s apiserver_. + // + // This means that implementations must obey some rules when updating this + // section. + // + // * Implementations MUST perform a read-modify-write cycle on this field + // before modifying it. That is, when modifying this field, implementations + // must be confident they have fetched the most recent version of this field, + // and ensure that changes they make are on that recent version. + // * Implementations MUST NOT remove or reorder Conditions that they are not + // directly responsible for. For example, if an implementation sees a Condition + // with type `special.io/SomeField`, it MUST NOT remove, change or update that + // Condition. + // * Implementations MUST always _merge_ changes into Conditions of the same Type, + // rather than creating more than one Condition of the same Type. + // * Implementations MUST always update the `observedGeneration` field of the + // Condition to the `metadata.generation` of the Gateway at the time of update creation. + // * If the `observedGeneration` of a Condition is _greater than_ the value the + // implementation knows about, then it MUST NOT perform the update on that Condition, + // but must wait for a future reconciliation and status update. (The assumption is that + // the implementation's copy of the object is stale and an update will be re-triggered + // if relevant.) + // + // + // // +optional // +listType=map // +listMapKey=type @@ -252,10 +300,10 @@ type GatewayClassStatus struct { Conditions []metav1.Condition `json:"conditions,omitempty"` // SupportedFeatures is the set of features the GatewayClass support. - // It MUST be sorted in ascending alphabetical order. + // It MUST be sorted in ascending alphabetical order by the Name key. // +optional - // +listType=set - // + // +listType=map + // +listMapKey=name // +kubebuilder:validation:MaxItems=64 SupportedFeatures []SupportedFeature `json:"supportedFeatures,omitempty"` } @@ -269,6 +317,11 @@ type GatewayClassList struct { Items []GatewayClass `json:"items"` } -// SupportedFeature is used to describe distinct features that are covered by +// FeatureName is used to describe distinct features that are covered by // conformance tests. -type SupportedFeature string +type FeatureName string + +type SupportedFeature struct { + // +required + Name FeatureName `json:"name"` +} diff --git a/vendor/sigs.k8s.io/gateway-api/apis/v1/gatewayclass_types_overrides.go b/vendor/sigs.k8s.io/gateway-api/apis/v1/gatewayclass_types_overrides.go new file mode 100644 index 000000000..8d768fdea --- /dev/null +++ b/vendor/sigs.k8s.io/gateway-api/apis/v1/gatewayclass_types_overrides.go @@ -0,0 +1,60 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "encoding/json" + "errors" +) + +// Below code handles the experimental field breaking change introduced in +// https://github.com/kubernetes-sigs/gateway-api/pull/3200/. +// We are overriding the UnmarshalJSON function to be able to handle cases where +// users had the old version of the GatewayClass CRD applied with SupportedFeatures +// as a list of strings and not list of objects. +// See https://github.com/kubernetes-sigs/gateway-api/issues/3464 +// for more information. + +func (s *SupportedFeature) UnmarshalJSON(data []byte) error { + var oldSupportedFeature oldSupportedFeature + var unmarshalTypeErr *json.UnmarshalTypeError + if err := json.Unmarshal(data, &oldSupportedFeature); err == nil { + s.Name = FeatureName(oldSupportedFeature) + return nil + } else if !errors.As(err, &unmarshalTypeErr) { + // If the error is not a type error, return it + return err + } + + var si supportedFeatureInternal + if err := json.Unmarshal(data, &si); err != nil { + return err + } + s.Name = si.Name + return nil +} + +// This is solely for the purpose of ensuring backward compatibility and +// SHOULD NOT be used elsewhere. +type supportedFeatureInternal struct { + // +required + Name FeatureName `json:"name"` +} + +// This is solely for the purpose of ensuring backward compatibility and +// SHOULD NOT be used elsewhere. +type oldSupportedFeature string diff --git a/vendor/sigs.k8s.io/gateway-api/apis/v1/grpcroute_types.go b/vendor/sigs.k8s.io/gateway-api/apis/v1/grpcroute_types.go index 91a8a3d26..5f9bde7a8 100644 --- a/vendor/sigs.k8s.io/gateway-api/apis/v1/grpcroute_types.go +++ b/vendor/sigs.k8s.io/gateway-api/apis/v1/grpcroute_types.go @@ -56,13 +56,16 @@ import ( // Implementations MAY also accept HTTP/2 connections with an upgrade from // HTTP/1, i.e. without prior knowledge. type GRPCRoute struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + // +optional metav1.ObjectMeta `json:"metadata,omitempty"` // Spec defines the desired state of GRPCRoute. + // +required Spec GRPCRouteSpec `json:"spec,omitempty"` // Status defines the current state of GRPCRoute. + // +optional Status GRPCRouteStatus `json:"status,omitempty"` } @@ -136,13 +139,17 @@ type GRPCRouteSpec struct { // Support: Core // // +optional + // +listType=atomic // +kubebuilder:validation:MaxItems=16 Hostnames []Hostname `json:"hostnames,omitempty"` // Rules are a list of GRPC matchers, filters and actions. // // +optional + // +listType=atomic // +kubebuilder:validation:MaxItems=16 + // +kubebuilder:validation:XValidation:message="While 16 rules and 64 matches per rule are allowed, the total number of matches across all rules in a route must be less than 128",rule="(self.size() > 0 ? (has(self[0].matches) ? self[0].matches.size() : 0) : 0) + (self.size() > 1 ? (has(self[1].matches) ? self[1].matches.size() : 0) : 0) + (self.size() > 2 ? (has(self[2].matches) ? self[2].matches.size() : 0) : 0) + (self.size() > 3 ? (has(self[3].matches) ? self[3].matches.size() : 0) : 0) + (self.size() > 4 ? (has(self[4].matches) ? self[4].matches.size() : 0) : 0) + (self.size() > 5 ? (has(self[5].matches) ? self[5].matches.size() : 0) : 0) + (self.size() > 6 ? (has(self[6].matches) ? self[6].matches.size() : 0) : 0) + (self.size() > 7 ? (has(self[7].matches) ? self[7].matches.size() : 0) : 0) + (self.size() > 8 ? (has(self[8].matches) ? self[8].matches.size() : 0) : 0) + (self.size() > 9 ? (has(self[9].matches) ? self[9].matches.size() : 0) : 0) + (self.size() > 10 ? (has(self[10].matches) ? self[10].matches.size() : 0) : 0) + (self.size() > 11 ? (has(self[11].matches) ? self[11].matches.size() : 0) : 0) + (self.size() > 12 ? (has(self[12].matches) ? self[12].matches.size() : 0) : 0) + (self.size() > 13 ? (has(self[13].matches) ? self[13].matches.size() : 0) : 0) + (self.size() > 14 ? (has(self[14].matches) ? self[14].matches.size() : 0) : 0) + (self.size() > 15 ? (has(self[15].matches) ? self[15].matches.size() : 0) : 0) <= 128" + // Rules []GRPCRouteRule `json:"rules,omitempty"` } @@ -150,6 +157,12 @@ type GRPCRouteSpec struct { // conditions (matches), processing it (filters), and forwarding the request to // an API object (backendRefs). type GRPCRouteRule struct { + // Name is the name of the route rule. This name MUST be unique within a Route if it is set. + // + // Support: Extended + // +optional + Name *SectionName `json:"name,omitempty"` + // Matches define conditions used for matching the rule against incoming // gRPC requests. Each match is independent, i.e. this rule will be matched // if **any** one of the matches is satisfied. @@ -201,7 +214,8 @@ type GRPCRouteRule struct { // the above criteria. // // +optional - // +kubebuilder:validation:MaxItems=8 + // +listType=atomic + // +kubebuilder:validation:MaxItems=64 Matches []GRPCRouteMatch `json:"matches,omitempty"` // Filters define the filters that are applied to requests that match @@ -221,7 +235,7 @@ type GRPCRouteRule struct { // Specifying the same filter multiple times is not supported unless explicitly // indicated in the filter. // - // If an implementation can not support a combination of filters, it must clearly + // If an implementation cannot support a combination of filters, it must clearly // document that limitation. In cases where incompatible or unsupported // filters are specified and cause the `Accepted` condition to be set to status // `False`, implementations may use the `IncompatibleFilters` reason to specify @@ -230,6 +244,7 @@ type GRPCRouteRule struct { // Support: Core // // +optional + // +listType=atomic // +kubebuilder:validation:MaxItems=16 // +kubebuilder:validation:XValidation:message="RequestHeaderModifier filter cannot be repeated",rule="self.filter(f, f.type == 'RequestHeaderModifier').size() <= 1" // +kubebuilder:validation:XValidation:message="ResponseHeaderModifier filter cannot be repeated",rule="self.filter(f, f.type == 'ResponseHeaderModifier').size() <= 1" @@ -265,6 +280,7 @@ type GRPCRouteRule struct { // Support for weight: Core // // +optional + // +listType=atomic // +kubebuilder:validation:MaxItems=16 BackendRefs []GRPCBackendRef `json:"backendRefs,omitempty"` @@ -387,7 +403,7 @@ type GRPCHeaderMatch struct { // // +optional // +kubebuilder:default=Exact - Type *HeaderMatchType `json:"type,omitempty"` + Type *GRPCHeaderMatchType `json:"type,omitempty"` // Name is the name of the gRPC Header to be matched. // @@ -396,12 +412,14 @@ type GRPCHeaderMatch struct { // entries with an equivalent header name MUST be ignored. Due to the // case-insensitivity of header names, "foo" and "Foo" are considered // equivalent. + // +required Name GRPCHeaderName `json:"name"` // Value is the value of the gRPC Header to be matched. // // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=4096 + // +required Value string `json:"value"` } @@ -512,6 +530,7 @@ type GRPCRouteFilter struct { // +unionDiscriminator // +kubebuilder:validation:Enum=ResponseHeaderModifier;RequestHeaderModifier;RequestMirror;ExtensionRef // + // +required Type GRPCRouteFilterType `json:"type"` // RequestHeaderModifier defines a schema for a filter that modifies request @@ -541,6 +560,8 @@ type GRPCRouteFilter struct { // Support: Extended // // +optional + // + // +kubebuilder:validation:XValidation:message="Only one of percent or fraction may be specified in HTTPRequestMirrorFilter",rule="!(has(self.percent) && has(self.fraction))" RequestMirror *HTTPRequestMirrorFilter `json:"requestMirror,omitempty"` // ExtensionRef is an optional, implementation-specific extension to the @@ -620,6 +641,7 @@ type GRPCBackendRef struct { // Filters field in GRPCRouteRule.) // // +optional + // +listType=atomic // +kubebuilder:validation:MaxItems=16 // +kubebuilder:validation:XValidation:message="RequestHeaderModifier filter cannot be repeated",rule="self.filter(f, f.type == 'RequestHeaderModifier').size() <= 1" // +kubebuilder:validation:XValidation:message="ResponseHeaderModifier filter cannot be repeated",rule="self.filter(f, f.type == 'ResponseHeaderModifier').size() <= 1" diff --git a/vendor/sigs.k8s.io/gateway-api/apis/v1/httproute_types.go b/vendor/sigs.k8s.io/gateway-api/apis/v1/httproute_types.go index 736e80982..3d89af0da 100644 --- a/vendor/sigs.k8s.io/gateway-api/apis/v1/httproute_types.go +++ b/vendor/sigs.k8s.io/gateway-api/apis/v1/httproute_types.go @@ -33,13 +33,16 @@ import ( // used to specify additional processing steps. Backends specify where matching // requests should be routed. type HTTPRoute struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + // +optional metav1.ObjectMeta `json:"metadata,omitempty"` // Spec defines the desired state of HTTPRoute. + // +required Spec HTTPRouteSpec `json:"spec"` // Status defines the current state of HTTPRoute. + // +optional Status HTTPRouteStatus `json:"status,omitempty"` } @@ -111,14 +114,18 @@ type HTTPRouteSpec struct { // Support: Core // // +optional + // +listType=atomic // +kubebuilder:validation:MaxItems=16 Hostnames []Hostname `json:"hostnames,omitempty"` // Rules are a list of HTTP matchers, filters and actions. // // +optional + // +listType=atomic + // // +kubebuilder:validation:MaxItems=16 // +kubebuilder:default={{matches: {{path: {type: "PathPrefix", value: "/"}}}}} + // +kubebuilder:validation:XValidation:message="While 16 rules and 64 matches per rule are allowed, the total number of matches across all rules in a route must be less than 128",rule="(self.size() > 0 ? self[0].matches.size() : 0) + (self.size() > 1 ? self[1].matches.size() : 0) + (self.size() > 2 ? self[2].matches.size() : 0) + (self.size() > 3 ? self[3].matches.size() : 0) + (self.size() > 4 ? self[4].matches.size() : 0) + (self.size() > 5 ? self[5].matches.size() : 0) + (self.size() > 6 ? self[6].matches.size() : 0) + (self.size() > 7 ? self[7].matches.size() : 0) + (self.size() > 8 ? self[8].matches.size() : 0) + (self.size() > 9 ? self[9].matches.size() : 0) + (self.size() > 10 ? self[10].matches.size() : 0) + (self.size() > 11 ? self[11].matches.size() : 0) + (self.size() > 12 ? self[12].matches.size() : 0) + (self.size() > 13 ? self[13].matches.size() : 0) + (self.size() > 14 ? self[14].matches.size() : 0) + (self.size() > 15 ? self[15].matches.size() : 0) <= 128" Rules []HTTPRouteRule `json:"rules,omitempty"` } @@ -132,6 +139,12 @@ type HTTPRouteSpec struct { // +kubebuilder:validation:XValidation:message="Within backendRefs, when using RequestRedirect filter with path.replacePrefixMatch, exactly one PathPrefix match must be specified",rule="(has(self.backendRefs) && self.backendRefs.exists_one(b, (has(b.filters) && b.filters.exists_one(f, has(f.requestRedirect) && has(f.requestRedirect.path) && f.requestRedirect.path.type == 'ReplacePrefixMatch' && has(f.requestRedirect.path.replacePrefixMatch))) )) ? ((size(self.matches) != 1 || !has(self.matches[0].path) || self.matches[0].path.type != 'PathPrefix') ? false : true) : true" // +kubebuilder:validation:XValidation:message="Within backendRefs, When using URLRewrite filter with path.replacePrefixMatch, exactly one PathPrefix match must be specified",rule="(has(self.backendRefs) && self.backendRefs.exists_one(b, (has(b.filters) && b.filters.exists_one(f, has(f.urlRewrite) && has(f.urlRewrite.path) && f.urlRewrite.path.type == 'ReplacePrefixMatch' && has(f.urlRewrite.path.replacePrefixMatch))) )) ? ((size(self.matches) != 1 || !has(self.matches[0].path) || self.matches[0].path.type != 'PathPrefix') ? false : true) : true" type HTTPRouteRule struct { + // Name is the name of the route rule. This name MUST be unique within a Route if it is set. + // + // Support: Extended + // +optional + Name *SectionName `json:"name,omitempty"` + // Matches define conditions used for matching the rule against incoming // HTTP requests. Each match is independent, i.e. this rule will be matched // if **any** one of the matches is satisfied. @@ -190,7 +203,8 @@ type HTTPRouteRule struct { // parent a request is coming from, a HTTP 404 status code MUST be returned. // // +optional - // +kubebuilder:validation:MaxItems=8 + // +listType=atomic + // +kubebuilder:validation:MaxItems=64 // +kubebuilder:default={{path:{ type: "PathPrefix", value: "/"}}} Matches []HTTPRouteMatch `json:"matches,omitempty"` @@ -201,7 +215,7 @@ type HTTPRouteRule struct { // they are specified. // // Implementations MAY choose to implement this ordering strictly, rejecting - // any combination or order of filters that can not be supported. If implementations + // any combination or order of filters that cannot be supported. If implementations // choose a strict interpretation of filter ordering, they MUST clearly document // that behavior. // @@ -223,7 +237,7 @@ type HTTPRouteRule struct { // // All filters are expected to be compatible with each other except for the // URLRewrite and RequestRedirect filters, which may not be combined. If an - // implementation can not support other combinations of filters, they must clearly + // implementation cannot support other combinations of filters, they must clearly // document that limitation. In cases where incompatible or unsupported // filters are specified and cause the `Accepted` condition to be set to status // `False`, implementations may use the `IncompatibleFilters` reason to specify @@ -232,6 +246,7 @@ type HTTPRouteRule struct { // Support: Core // // +optional + // +listType=atomic // +kubebuilder:validation:MaxItems=16 // +kubebuilder:validation:XValidation:message="May specify either httpRouteFilterRequestRedirect or httpRouteFilterRequestRewrite, but not both",rule="!(self.exists(f, f.type == 'RequestRedirect') && self.exists(f, f.type == 'URLRewrite'))" // +kubebuilder:validation:XValidation:message="RequestHeaderModifier filter cannot be repeated",rule="self.filter(f, f.type == 'RequestHeaderModifier').size() <= 1" @@ -263,6 +278,11 @@ type HTTPRouteRule struct { // invalid, 50 percent of traffic must receive a 500. Implementations may // choose how that 50 percent is determined. // + // When a HTTPBackendRef refers to a Service that has no ready endpoints, + // implementations SHOULD return a 503 for requests to that backend instead. + // If an implementation chooses to do this, all of the above rules for 500 responses + // MUST also apply for responses that return a 503. + // // Support: Core for Kubernetes Service // // Support: Extended for Kubernetes ServiceImport @@ -272,6 +292,7 @@ type HTTPRouteRule struct { // Support for weight: Core // // +optional + // +listType=atomic // +kubebuilder:validation:MaxItems=16 BackendRefs []HTTPBackendRef `json:"backendRefs,omitempty"` @@ -280,9 +301,16 @@ type HTTPRouteRule struct { // Support: Extended // // +optional - // Timeouts *HTTPRouteTimeouts `json:"timeouts,omitempty"` + // Retry defines the configuration for when to retry an HTTP request. + // + // Support: Extended + // + // +optional + // + Retry *HTTPRouteRetry `json:"retry,omitempty"` + // SessionPersistence defines and configures session persistence // for the route rule. // @@ -316,7 +344,8 @@ type HTTPRouteTimeouts struct { // request stream has been received instead of immediately after the transaction is // initiated by the client. // - // When this field is unspecified, request timeout behavior is implementation-specific. + // The value of Request is a Gateway API Duration string as defined by GEP-2257. When this + // field is unspecified, request timeout behavior is implementation-specific. // // Support: Extended // @@ -336,8 +365,10 @@ type HTTPRouteTimeouts struct { // may result in more than one call from the gateway to the destination backend, // for example, if automatic retries are supported. // - // Because the Request timeout encompasses the BackendRequest timeout, the value of - // BackendRequest must be <= the value of Request timeout. + // The value of BackendRequest must be a Gateway API Duration string as defined by + // GEP-2257. When this field is unspecified, its behavior is implementation-specific; + // when specified, the value of BackendRequest must be no more than the value of the + // Request timeout (since the Request timeout encompasses the BackendRequest timeout). // // Support: Extended // @@ -345,6 +376,96 @@ type HTTPRouteTimeouts struct { BackendRequest *Duration `json:"backendRequest,omitempty"` } +// HTTPRouteRetry defines retry configuration for an HTTPRoute. +// +// Implementations SHOULD retry on connection errors (disconnect, reset, timeout, +// TCP failure) if a retry stanza is configured. +type HTTPRouteRetry struct { + // Codes defines the HTTP response status codes for which a backend request + // should be retried. + // + // Support: Extended + // + // +optional + // +listType=atomic + Codes []HTTPRouteRetryStatusCode `json:"codes,omitempty"` + + // Attempts specifies the maximum number of times an individual request + // from the gateway to a backend should be retried. + // + // If the maximum number of retries has been attempted without a successful + // response from the backend, the Gateway MUST return an error. + // + // When this field is unspecified, the number of times to attempt to retry + // a backend request is implementation-specific. + // + // Support: Extended + // + // +optional + Attempts *int `json:"attempts,omitempty"` + + // Backoff specifies the minimum duration a Gateway should wait between + // retry attempts and is represented in Gateway API Duration formatting. + // + // For example, setting the `rules[].retry.backoff` field to the value + // `100ms` will cause a backend request to first be retried approximately + // 100 milliseconds after timing out or receiving a response code configured + // to be retryable. + // + // An implementation MAY use an exponential or alternative backoff strategy + // for subsequent retry attempts, MAY cap the maximum backoff duration to + // some amount greater than the specified minimum, and MAY add arbitrary + // jitter to stagger requests, as long as unsuccessful backend requests are + // not retried before the configured minimum duration. + // + // If a Request timeout (`rules[].timeouts.request`) is configured on the + // route, the entire duration of the initial request and any retry attempts + // MUST not exceed the Request timeout duration. If any retry attempts are + // still in progress when the Request timeout duration has been reached, + // these SHOULD be canceled if possible and the Gateway MUST immediately + // return a timeout error. + // + // If a BackendRequest timeout (`rules[].timeouts.backendRequest`) is + // configured on the route, any retry attempts which reach the configured + // BackendRequest timeout duration without a response SHOULD be canceled if + // possible and the Gateway should wait for at least the specified backoff + // duration before attempting to retry the backend request again. + // + // If a BackendRequest timeout is _not_ configured on the route, retry + // attempts MAY time out after an implementation default duration, or MAY + // remain pending until a configured Request timeout or implementation + // default duration for total request time is reached. + // + // When this field is unspecified, the time to wait between retry attempts + // is implementation-specific. + // + // Support: Extended + // + // +optional + Backoff *Duration `json:"backoff,omitempty"` +} + +// HTTPRouteRetryStatusCode defines an HTTP response status code for +// which a backend request should be retried. +// +// Implementations MUST support the following status codes as retryable: +// +// * 500 +// * 502 +// * 503 +// * 504 +// +// Implementations MAY support specifying additional discrete values in the +// 500-599 range. +// +// Implementations MAY support specifying discrete values in the 400-499 range, +// which are often inadvisable to retry. +// +// +kubebuilder:validation:Minimum:=400 +// +kubebuilder:validation:Maximum:=599 +// +type HTTPRouteRetryStatusCode int + // PathMatchType specifies the semantics of how HTTP paths should be compared. // Valid PathMatchType values, along with their support levels, are: // @@ -374,7 +495,7 @@ const ( PathMatchExact PathMatchType = "Exact" // Matches based on a URL path prefix split by `/`. Matching is - // case sensitive and done on a path element by element basis. A + // case-sensitive and done on a path element by element basis. A // path element refers to the list of labels in the path split by // the `/` separator. When specified, a trailing `/` is ignored. // @@ -483,7 +604,7 @@ type HTTPHeaderMatch struct { Type *HeaderMatchType `json:"type,omitempty"` // Name is the name of the HTTP Header to be matched. Name matching MUST be - // case insensitive. (See https://tools.ietf.org/html/rfc7230#section-3.2). + // case-insensitive. (See https://tools.ietf.org/html/rfc7230#section-3.2). // // If multiple entries specify equivalent header names, only the first // entry with an equivalent name MUST be considered for a match. Subsequent @@ -496,12 +617,14 @@ type HTTPHeaderMatch struct { // Generally, proxies should follow the guidance from the RFC: // https://www.rfc-editor.org/rfc/rfc7230.html#section-3.2.2 regarding // processing a repeated header, with special handling for "Set-Cookie". + // +required Name HTTPHeaderName `json:"name"` // Value is the value of HTTP Header to be matched. // // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=4096 + // +required Value string `json:"value"` } @@ -563,12 +686,14 @@ type HTTPQueryParamMatch struct { // // Users SHOULD NOT route traffic based on repeated query params to guard // themselves against potential differences in the implementations. + // +required Name HTTPHeaderName `json:"name"` // Value is the value of HTTP query param to be matched. // // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=1024 + // +required Value string `json:"value"` } @@ -588,6 +713,9 @@ type HTTPQueryParamMatch struct { // +kubebuilder:validation:Enum=GET;HEAD;POST;PUT;DELETE;CONNECT;OPTIONS;TRACE;PATCH type HTTPMethod string +// +kubebuilder:validation:Enum=GET;HEAD;POST;PUT;DELETE;CONNECT;OPTIONS;TRACE;PATCH;* +type HTTPMethodWithWildcard string + const ( HTTPMethodGet HTTPMethod = "GET" HTTPMethodHead HTTPMethod = "HEAD" @@ -674,6 +802,10 @@ type HTTPRouteMatch struct { // +kubebuilder:validation:XValidation:message="filter.requestRedirect must be specified for RequestRedirect filter.type",rule="!(!has(self.requestRedirect) && self.type == 'RequestRedirect')" // +kubebuilder:validation:XValidation:message="filter.urlRewrite must be nil if the filter.type is not URLRewrite",rule="!(has(self.urlRewrite) && self.type != 'URLRewrite')" // +kubebuilder:validation:XValidation:message="filter.urlRewrite must be specified for URLRewrite filter.type",rule="!(!has(self.urlRewrite) && self.type == 'URLRewrite')" +// +// +// +// // +kubebuilder:validation:XValidation:message="filter.extensionRef must be nil if the filter.type is not ExtensionRef",rule="!(has(self.extensionRef) && self.type != 'ExtensionRef')" // +kubebuilder:validation:XValidation:message="filter.extensionRef must be specified for ExtensionRef filter.type",rule="!(!has(self.extensionRef) && self.type == 'ExtensionRef')" type HTTPRouteFilter struct { @@ -712,6 +844,8 @@ type HTTPRouteFilter struct { // // +unionDiscriminator // +kubebuilder:validation:Enum=RequestHeaderModifier;ResponseHeaderModifier;RequestMirror;RequestRedirect;URLRewrite;ExtensionRef + // + // +required Type HTTPRouteFilterType `json:"type"` // RequestHeaderModifier defines a schema for a filter that modifies request @@ -741,6 +875,8 @@ type HTTPRouteFilter struct { // Support: Extended // // +optional + // + // +kubebuilder:validation:XValidation:message="Only one of percent or fraction may be specified in HTTPRequestMirrorFilter",rule="!(has(self.percent) && has(self.fraction))" RequestMirror *HTTPRequestMirrorFilter `json:"requestMirror,omitempty"` // RequestRedirect defines a schema for a filter that responds to the @@ -758,6 +894,28 @@ type HTTPRouteFilter struct { // +optional URLRewrite *HTTPURLRewriteFilter `json:"urlRewrite,omitempty"` + // CORS defines a schema for a filter that responds to the + // cross-origin request based on HTTP response header. + // + // Support: Extended + // + // +optional + // + CORS *HTTPCORSFilter `json:"cors,omitempty"` + + // ExternalAuth configures settings related to sending request details + // to an external auth service. The external service MUST authenticate + // the request, and MAY authorize the request as well. + // + // If there is any problem communicating with the external service, + // this filter MUST fail closed. + // + // Support: Extended + // + // +optional + // + ExternalAuth *HTTPExternalAuthFilter `json:"externalAuth,omitempty"` + // ExtensionRef is an optional, implementation-specific extension to the // "filter" behavior. For example, resource "myroutefilter" in group // "networking.example.net"). ExtensionRef MUST NOT be used for core and @@ -820,6 +978,27 @@ const ( // Support in HTTPBackendRef: Extended HTTPRouteFilterRequestMirror HTTPRouteFilterType = "RequestMirror" + // HTTPRouteFilterCORS can be used to add CORS headers to an + // HTTP response before it is sent to the client. + // + // Support in HTTPRouteRule: Extended + // + // Support in HTTPBackendRef: Extended + // + HTTPRouteFilterCORS HTTPRouteFilterType = "CORS" + + // HTTPRouteFilterExternalAuth can be used to configure a Gateway implementation + // to call out to an external Auth server, which MUST perform Authentication + // and MAY perform Authorization on the matched request before the request + // is forwarded to the backend. + // + // Support in HTTPRouteRule: Extended + // + // Feature Name: HTTPRouteExternalAuth + // + // + HTTPRouteFilterExternalAuth HTTPRouteFilterType = "ExternalAuth" + // HTTPRouteFilterExtensionRef should be used for configuring custom // HTTP filters. // @@ -832,28 +1011,30 @@ const ( // HTTPHeader represents an HTTP Header name and value as defined by RFC 7230. type HTTPHeader struct { // Name is the name of the HTTP Header to be matched. Name matching MUST be - // case insensitive. (See https://tools.ietf.org/html/rfc7230#section-3.2). + // case-insensitive. (See https://tools.ietf.org/html/rfc7230#section-3.2). // // If multiple entries specify equivalent header names, the first entry with // an equivalent name MUST be considered for a match. Subsequent entries // with an equivalent header name MUST be ignored. Due to the // case-insensitivity of header names, "foo" and "Foo" are considered // equivalent. + // +required Name HTTPHeaderName `json:"name"` // Value is the value of HTTP Header to be matched. // // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=4096 + // +required Value string `json:"value"` } // HTTPHeaderFilter defines a filter that modifies the headers of an HTTP -// request or response. Only one action for a given header name is permitted. -// Filters specifying multiple actions of the same or different type for any one -// header name are invalid and will be rejected by CRD validation. -// Configuration to set or add multiple values for a header must use RFC 7230 -// header value formatting, separating each value with a comma. +// request or response. Only one action for a given header name is +// permitted. Filters specifying multiple actions of the same or different +// type for any one header name are invalid. Configuration to set or add +// multiple values for a header must use RFC 7230 header value formatting, +// separating each value with a comma. type HTTPHeaderFilter struct { // Set overwrites the request with the given header (name, value) // before the action. @@ -963,6 +1144,7 @@ type HTTPPathModifier struct { // Reason of `UnsupportedValue`. // // +kubebuilder:validation:Enum=ReplaceFullPath;ReplacePrefixMatch + // +required Type HTTPPathModifierType `json:"type"` // ReplaceFullPath specifies the value with which to replace the full path @@ -1070,6 +1252,9 @@ type HTTPRequestRedirectFilter struct { // Support: Extended // // +optional + // + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 Port *PortNumber `json:"port,omitempty"` // StatusCode is the HTTP status code to be used in response. @@ -1136,7 +1321,438 @@ type HTTPRequestMirrorFilter struct { // Support: Extended for Kubernetes Service // // Support: Implementation-specific for any other resource + // +required BackendRef BackendObjectReference `json:"backendRef"` + + // Percent represents the percentage of requests that should be + // mirrored to BackendRef. Its minimum value is 0 (indicating 0% of + // requests) and its maximum value is 100 (indicating 100% of requests). + // + // Only one of Fraction or Percent may be specified. If neither field + // is specified, 100% of requests will be mirrored. + // + // +optional + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=100 + Percent *int32 `json:"percent,omitempty"` + + // Fraction represents the fraction of requests that should be + // mirrored to BackendRef. + // + // Only one of Fraction or Percent may be specified. If neither field + // is specified, 100% of requests will be mirrored. + // + // +optional + Fraction *Fraction `json:"fraction,omitempty"` +} + +// HTTPCORSFilter defines a filter that that configures Cross-Origin Request +// Sharing (CORS). +type HTTPCORSFilter struct { + // AllowOrigins indicates whether the response can be shared with requested + // resource from the given `Origin`. + // + // The `Origin` consists of a scheme and a host, with an optional port, and + // takes the form `://(:)`. + // + // Valid values for scheme are: `http` and `https`. + // + // Valid values for port are any integer between 1 and 65535 (the list of + // available TCP/UDP ports). Note that, if not included, port `80` is + // assumed for `http` scheme origins, and port `443` is assumed for `https` + // origins. This may affect origin matching. + // + // The host part of the origin may contain the wildcard character `*`. These + // wildcard characters behave as follows: + // + // * `*` is a greedy match to the _left_, including any number of + // DNS labels to the left of its position. This also means that + // `*` will include any number of period `.` characters to the + // left of its position. + // * A wildcard by itself matches all hosts. + // + // An origin value that includes _only_ the `*` character indicates requests + // from all `Origin`s are allowed. + // + // When the `AllowOrigins` field is configured with multiple origins, it + // means the server supports clients from multiple origins. If the request + // `Origin` matches the configured allowed origins, the gateway must return + // the given `Origin` and sets value of the header + // `Access-Control-Allow-Origin` same as the `Origin` header provided by the + // client. + // + // The status code of a successful response to a "preflight" request is + // always an OK status (i.e., 204 or 200). + // + // If the request `Origin` does not match the configured allowed origins, + // the gateway returns 204/200 response but doesn't set the relevant + // cross-origin response headers. Alternatively, the gateway responds with + // 403 status to the "preflight" request is denied, coupled with omitting + // the CORS headers. The cross-origin request fails on the client side. + // Therefore, the client doesn't attempt the actual cross-origin request. + // + // The `Access-Control-Allow-Origin` response header can only use `*` + // wildcard as value when the `AllowCredentials` field is false or omitted. + // + // When the `AllowCredentials` field is true and `AllowOrigins` field + // specified with the `*` wildcard, the gateway must return a single origin + // in the value of the `Access-Control-Allow-Origin` response header, + // instead of specifying the `*` wildcard. The value of the header + // `Access-Control-Allow-Origin` is same as the `Origin` header provided by + // the client. + // + // Support: Extended + // +listType=set + // +kubebuilder:validation:MaxItems=64 + // +kubebuilder:validation:XValidation:message="AllowOrigins cannot contain '*' alongside other origins",rule="!('*' in self && self.size() > 1)" + // +optional + AllowOrigins []CORSOrigin `json:"allowOrigins,omitempty"` + + // AllowCredentials indicates whether the actual cross-origin request allows + // to include credentials. + // + // When set to true, the gateway will include the `Access-Control-Allow-Credentials` + // response header with value true (case-sensitive). + // + // When set to false or omitted the gateway will omit the header + // `Access-Control-Allow-Credentials` entirely (this is the standard CORS + // behavior). + // + // Support: Extended + // + // +optional + AllowCredentials *bool `json:"allowCredentials,omitempty"` + + // AllowMethods indicates which HTTP methods are supported for accessing the + // requested resource. + // + // Valid values are any method defined by RFC9110, along with the special + // value `*`, which represents all HTTP methods are allowed. + // + // Method names are case sensitive, so these values are also case-sensitive. + // (See https://www.rfc-editor.org/rfc/rfc2616#section-5.1.1) + // + // Multiple method names in the value of the `Access-Control-Allow-Methods` + // response header are separated by a comma (","). + // + // A CORS-safelisted method is a method that is `GET`, `HEAD`, or `POST`. + // (See https://fetch.spec.whatwg.org/#cors-safelisted-method) The + // CORS-safelisted methods are always allowed, regardless of whether they + // are specified in the `AllowMethods` field. + // + // When the `AllowMethods` field is configured with one or more methods, the + // gateway must return the `Access-Control-Allow-Methods` response header + // which value is present in the `AllowMethods` field. + // + // If the HTTP method of the `Access-Control-Request-Method` request header + // is not included in the list of methods specified by the response header + // `Access-Control-Allow-Methods`, it will present an error on the client + // side. + // + // The `Access-Control-Allow-Methods` response header can only use `*` + // wildcard as value when the `AllowCredentials` field is false or omitted. + // + // When the `AllowCredentials` field is true and `AllowMethods` field + // specified with the `*` wildcard, the gateway must specify one HTTP method + // in the value of the Access-Control-Allow-Methods response header. The + // value of the header `Access-Control-Allow-Methods` is same as the + // `Access-Control-Request-Method` header provided by the client. If the + // header `Access-Control-Request-Method` is not included in the request, + // the gateway will omit the `Access-Control-Allow-Methods` response header, + // instead of specifying the `*` wildcard. A Gateway implementation may + // choose to add implementation-specific default methods. + // + // Support: Extended + // + // +listType=set + // +kubebuilder:validation:MaxItems=9 + // +kubebuilder:validation:XValidation:message="AllowMethods cannot contain '*' alongside other methods",rule="!('*' in self && self.size() > 1)" + // +optional + AllowMethods []HTTPMethodWithWildcard `json:"allowMethods,omitempty"` + + // AllowHeaders indicates which HTTP request headers are supported for + // accessing the requested resource. + // + // Header names are not case sensitive. + // + // Multiple header names in the value of the `Access-Control-Allow-Headers` + // response header are separated by a comma (","). + // + // When the `AllowHeaders` field is configured with one or more headers, the + // gateway must return the `Access-Control-Allow-Headers` response header + // which value is present in the `AllowHeaders` field. + // + // If any header name in the `Access-Control-Request-Headers` request header + // is not included in the list of header names specified by the response + // header `Access-Control-Allow-Headers`, it will present an error on the + // client side. + // + // If any header name in the `Access-Control-Allow-Headers` response header + // does not recognize by the client, it will also occur an error on the + // client side. + // + // A wildcard indicates that the requests with all HTTP headers are allowed. + // The `Access-Control-Allow-Headers` response header can only use `*` + // wildcard as value when the `AllowCredentials` field is false or omitted. + // + // When the `AllowCredentials` field is true and `AllowHeaders` field + // specified with the `*` wildcard, the gateway must specify one or more + // HTTP headers in the value of the `Access-Control-Allow-Headers` response + // header. The value of the header `Access-Control-Allow-Headers` is same as + // the `Access-Control-Request-Headers` header provided by the client. If + // the header `Access-Control-Request-Headers` is not included in the + // request, the gateway will omit the `Access-Control-Allow-Headers` + // response header, instead of specifying the `*` wildcard. A Gateway + // implementation may choose to add implementation-specific default headers. + // + // Support: Extended + // + // +listType=set + // +kubebuilder:validation:MaxItems=64 + // +optional + AllowHeaders []HTTPHeaderName `json:"allowHeaders,omitempty"` + + // ExposeHeaders indicates which HTTP response headers can be exposed + // to client-side scripts in response to a cross-origin request. + // + // A CORS-safelisted response header is an HTTP header in a CORS response + // that it is considered safe to expose to the client scripts. + // The CORS-safelisted response headers include the following headers: + // `Cache-Control` + // `Content-Language` + // `Content-Length` + // `Content-Type` + // `Expires` + // `Last-Modified` + // `Pragma` + // (See https://fetch.spec.whatwg.org/#cors-safelisted-response-header-name) + // The CORS-safelisted response headers are exposed to client by default. + // + // When an HTTP header name is specified using the `ExposeHeaders` field, + // this additional header will be exposed as part of the response to the + // client. + // + // Header names are not case sensitive. + // + // Multiple header names in the value of the `Access-Control-Expose-Headers` + // response header are separated by a comma (","). + // + // A wildcard indicates that the responses with all HTTP headers are exposed + // to clients. The `Access-Control-Expose-Headers` response header can only + // use `*` wildcard as value when the `AllowCredentials` field is false or omitted. + // + // Support: Extended + // + // +optional + // +listType=set + // +kubebuilder:validation:MaxItems=64 + ExposeHeaders []HTTPHeaderName `json:"exposeHeaders,omitempty"` + + // MaxAge indicates the duration (in seconds) for the client to cache the + // results of a "preflight" request. + // + // The information provided by the `Access-Control-Allow-Methods` and + // `Access-Control-Allow-Headers` response headers can be cached by the + // client until the time specified by `Access-Control-Max-Age` elapses. + // + // The default value of `Access-Control-Max-Age` response header is 5 + // (seconds). + // + // +optional + // +kubebuilder:default=5 + // +kubebuilder:validation:Minimum=1 + MaxAge int32 `json:"maxAge,omitempty"` +} + +// HTTPRouteExternalAuthProtcol specifies what protocol should be used +// for communicating with an external authorization server. +// +// Valid values are supplied as constants below. +type HTTPRouteExternalAuthProtocol string + +const ( + HTTPRouteExternalAuthGRPCProtocol HTTPRouteExternalAuthProtocol = "GRPC" + HTTPRouteExternalAuthHTTPProtocol HTTPRouteExternalAuthProtocol = "HTTP" +) + +// HTTPExternalAuthFilter defines a filter that modifies requests by sending +// request details to an external authorization server. +// +// Support: Extended +// Feature Name: HTTPRouteExternalAuth +// +kubebuilder:validation:XValidation:message="grpc must be specified when protocol is set to 'GRPC'",rule="self.protocol == 'GRPC' ? has(self.grpc) : true" +// +kubebuilder:validation:XValidation:message="protocol must be 'GRPC' when grpc is set",rule="has(self.grpc) ? self.protocol == 'GRPC' : true" +// +kubebuilder:validation:XValidation:message="http must be specified when protocol is set to 'HTTP'",rule="self.protocol == 'HTTP' ? has(self.http) : true" +// +kubebuilder:validation:XValidation:message="protocol must be 'HTTP' when http is set",rule="has(self.http) ? self.protocol == 'HTTP' : true" +type HTTPExternalAuthFilter struct { + // ExternalAuthProtocol describes which protocol to use when communicating with an + // ext_authz authorization server. + // + // When this is set to GRPC, each backend must use the Envoy ext_authz protocol + // on the port specified in `backendRefs`. Requests and responses are defined + // in the protobufs explained at: + // https://www.envoyproxy.io/docs/envoy/latest/api-v3/service/auth/v3/external_auth.proto + // + // When this is set to HTTP, each backend must respond with a `200` status + // code in on a successful authorization. Any other code is considered + // an authorization failure. + // + // Feature Names: + // GRPC Support - HTTPRouteExternalAuthGRPC + // HTTP Support - HTTPRouteExternalAuthHTTP + // + // +unionDiscriminator + // +required + // +kubebuilder:validation:Enum=HTTP;GRPC + ExternalAuthProtocol HTTPRouteExternalAuthProtocol `json:"protocol,omitempty"` + + // BackendRef is a reference to a backend to send authorization + // requests to. + // + // The backend must speak the selected protocol (GRPC or HTTP) on the + // referenced port. + // + // If the backend service requires TLS, use BackendTLSPolicy to tell the + // implementation to supply the TLS details to be used to connect to that + // backend. + // + // +required + BackendRef BackendObjectReference `json:"backendRef,omitempty"` + + // GRPCAuthConfig contains configuration for communication with ext_authz + // protocol-speaking backends. + // + // If unset, implementations must assume the default behavior for each + // included field is intended. + // + // +optional + GRPCAuthConfig *GRPCAuthConfig `json:"grpc,omitempty"` + + // HTTPAuthConfig contains configuration for communication with HTTP-speaking + // backends. + // + // If unset, implementations must assume the default behavior for each + // included field is intended. + // + // +optional + HTTPAuthConfig *HTTPAuthConfig `json:"http,omitempty"` + + // ForwardBody controls if requests to the authorization server should include + // the body of the client request; and if so, how big that body is allowed + // to be. + // + // It is expected that implementations will buffer the request body up to + // `forwardBody.maxSize` bytes. Bodies over that size must be rejected with a + // 4xx series error (413 or 403 are common examples), and fail processing + // of the filter. + // + // If unset, or `forwardBody.maxSize` is set to `0`, then the body will not + // be forwarded. + // + // Feature Name: HTTPRouteExternalAuthForwardBody + // + // + // +optional + ForwardBody *ForwardBodyConfig `json:"forwardBody,omitempty"` +} + +// GRPCAuthConfig contains configuration for communication with Auth server +// backends that speak Envoy's ext_authz gRPC protocol. +// +// Requests and responses are defined in the protobufs explained at: +// https://www.envoyproxy.io/docs/envoy/latest/api-v3/service/auth/v3/external_auth.proto +type GRPCAuthConfig struct { + // AllowedRequestHeaders specifies what headers from the client request + // will be sent to the authorization server. + // + // If this list is empty, then all headers must be sent. + // + // If the list has entries, only those entries must be sent. + // + // +optional + // +listType=set + // +kubebuilder:validation:MaxLength=64 + AllowedRequestHeaders []string `json:"allowedHeaders,omitempty"` +} + +// HTTPAuthConfig contains configuration for communication with HTTP-speaking +// backends. +type HTTPAuthConfig struct { + // Path sets the prefix that paths from the client request will have added + // when forwarded to the authorization server. + // + // When empty or unspecified, no prefix is added. + // + // Valid values are the same as the "value" regex for path values in the `match` + // stanza, and the validation regex will screen out invalid paths in the same way. + // Even with the validation, implementations MUST sanitize this input before using it + // directly. + // + // +optional + // +kubebuilder:validation:MaxLength=1024 + // +kubebuilder:validation:Pattern="^(?:[-A-Za-z0-9/._~!$&'()*+,;=:@]|[%][0-9a-fA-F]{2})+$" + Path string `json:"path,omitempty"` + + // AllowedRequestHeaders specifies what additional headers from the client request + // will be sent to the authorization server. + // + // The following headers must always be sent to the authorization server, + // regardless of this setting: + // + // * `Host` + // * `Method` + // * `Path` + // * `Content-Length` + // * `Authorization` + // + // If this list is empty, then only those headers must be sent. + // + // Note that `Content-Length` has a special behavior, in that the length + // sent must be correct for the actual request to the external authorization + // server - that is, it must reflect the actual number of bytes sent in the + // body of the request to the authorization server. + // + // So if the `forwardBody` stanza is unset, or `forwardBody.maxSize` is set + // to `0`, then `Content-Length` must be `0`. If `forwardBody.maxSize` is set + // to anything other than `0`, then the `Content-Length` of the authorization + // request must be set to the actual number of bytes forwarded. + // + // +optional + // +listType=set + // +kubebuilder:validation:MaxLength=64 + AllowedRequestHeaders []string `json:"allowedHeaders,omitempty"` + + // AllowedResponseHeaders specifies what headers from the authorization response + // will be copied into the request to the backend. + // + // If this list is empty, then all headers from the authorization server + // except Authority or Host must be copied. + // + // +optional + // +listType=set + // +kubebuilder:validation:MaxLength=64 + AllowedResponseHeaders []string `json:"allowedResponseHeaders,omitempty"` +} + +// ForwardBody configures if requests to the authorization server should include +// the body of the client request; and if so, how big that body is allowed +// to be. +// +// If empty or unset, do not forward the body. +type ForwardBodyConfig struct { + // MaxSize specifies how large in bytes the largest body that will be buffered + // and sent to the authorization server. If the body size is larger than + // `maxSize`, then the body sent to the authorization server must be + // truncated to `maxSize` bytes. + // + // Experimental note: This behavior needs to be checked against + // various dataplanes; it may need to be changed. + // See https://github.com/kubernetes-sigs/gateway-api/pull/4001#discussion_r2291405746 + // for more. + // + // If 0, the body will not be sent to the authorization server. + // +optional + MaxSize uint16 `json:"maxSize,omitempty"` } // HTTPBackendRef defines how a HTTPRoute forwards a HTTP request. @@ -1215,9 +1831,9 @@ type HTTPBackendRef struct { // Filters field in HTTPRouteRule.) // // +optional + // +listType=atomic // +kubebuilder:validation:MaxItems=16 // +kubebuilder:validation:XValidation:message="May specify either httpRouteFilterRequestRedirect or httpRouteFilterRequestRewrite, but not both",rule="!(self.exists(f, f.type == 'RequestRedirect') && self.exists(f, f.type == 'URLRewrite'))" - // +kubebuilder:validation:XValidation:message="May specify either httpRouteFilterRequestRedirect or httpRouteFilterRequestRewrite, but not both",rule="!(self.exists(f, f.type == 'RequestRedirect') && self.exists(f, f.type == 'URLRewrite'))" // +kubebuilder:validation:XValidation:message="RequestHeaderModifier filter cannot be repeated",rule="self.filter(f, f.type == 'RequestHeaderModifier').size() <= 1" // +kubebuilder:validation:XValidation:message="ResponseHeaderModifier filter cannot be repeated",rule="self.filter(f, f.type == 'ResponseHeaderModifier').size() <= 1" // +kubebuilder:validation:XValidation:message="RequestRedirect filter cannot be repeated",rule="self.filter(f, f.type == 'RequestRedirect').size() <= 1" diff --git a/vendor/sigs.k8s.io/gateway-api/apis/v1/object_reference_types.go b/vendor/sigs.k8s.io/gateway-api/apis/v1/object_reference_types.go index 421572ace..414e39b94 100644 --- a/vendor/sigs.k8s.io/gateway-api/apis/v1/object_reference_types.go +++ b/vendor/sigs.k8s.io/gateway-api/apis/v1/object_reference_types.go @@ -27,12 +27,15 @@ package v1 type LocalObjectReference struct { // Group is the group of the referent. For example, "gateway.networking.k8s.io". // When unspecified or empty string, core API group is inferred. + // +required Group Group `json:"group"` // Kind is kind of the referent. For example "HTTPRoute" or "Service". + // +required Kind Kind `json:"kind"` // Name is the name of the referent. + // +required Name ObjectName `json:"name"` } @@ -60,6 +63,7 @@ type SecretObjectReference struct { Kind *Kind `json:"kind"` // Name is the name of the referent. + // +required Name ObjectName `json:"name"` // Namespace is the namespace of the referenced object. When unspecified, the local @@ -121,6 +125,7 @@ type BackendObjectReference struct { Kind *Kind `json:"kind,omitempty"` // Name is the name of the referent. + // +required Name ObjectName `json:"name"` // Namespace is the namespace of the backend. When unspecified, the local @@ -143,6 +148,8 @@ type BackendObjectReference struct { // resource or this field. // // +optional + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 Port *PortNumber `json:"port,omitempty"` } @@ -156,13 +163,16 @@ type BackendObjectReference struct { // on the containing object. type ObjectReference struct { // Group is the group of the referent. For example, "gateway.networking.k8s.io". - // When unspecified or empty string, core API group is inferred. + // When set to the empty string, core API group is inferred. + // +required Group Group `json:"group"` // Kind is kind of the referent. For example "ConfigMap" or "Service". + // +required Kind Kind `json:"kind"` // Name is the name of the referent. + // +required Name ObjectName `json:"name"` // Namespace is the namespace of the referenced object. When unspecified, the local diff --git a/vendor/sigs.k8s.io/gateway-api/apis/v1/policy_types.go b/vendor/sigs.k8s.io/gateway-api/apis/v1/policy_types.go new file mode 100644 index 000000000..552db9bf7 --- /dev/null +++ b/vendor/sigs.k8s.io/gateway-api/apis/v1/policy_types.go @@ -0,0 +1,279 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +const ( + // PolicyLabelKey is the label whose presence identifies a CRD that the + // Gateway API Policy attachment model. The value of the label SHOULD be one + // of the following: + // - A label value of "Inherited" indicates that this Policy is inheritable. + // An example of inheritable policy is one which if applied at the Gateway + // level would affect all attached HTTPRoutes and their respective + // Backends. + // - A label value of "Direct" indicates that the policy only affects the + // resource to which it is attached and does not affect it's sub resources. + PolicyLabelKey = "gateway.networking.k8s.io/policy" +) + +// LocalPolicyTargetReference identifies an API object to apply a direct or +// inherited policy to. This should be used as part of Policy resources +// that can target Gateway API resources. For more information on how this +// policy attachment model works, and a sample Policy resource, refer to +// the policy attachment documentation for Gateway API. +type LocalPolicyTargetReference struct { + // Group is the group of the target resource. + // +required + Group Group `json:"group"` + + // Kind is kind of the target resource. + // +required + Kind Kind `json:"kind"` + + // Name is the name of the target resource. + // +required + Name ObjectName `json:"name"` +} + +// NamespacedPolicyTargetReference identifies an API object to apply a direct or +// inherited policy to, potentially in a different namespace. This should only +// be used as part of Policy resources that need to be able to target resources +// in different namespaces. For more information on how this policy attachment +// model works, and a sample Policy resource, refer to the policy attachment +// documentation for Gateway API. +type NamespacedPolicyTargetReference struct { + // Group is the group of the target resource. + // +required + Group Group `json:"group"` + + // Kind is kind of the target resource. + // +required + Kind Kind `json:"kind"` + + // Name is the name of the target resource. + // +required + Name ObjectName `json:"name"` + + // Namespace is the namespace of the referent. When unspecified, the local + // namespace is inferred. Even when policy targets a resource in a different + // namespace, it MUST only apply to traffic originating from the same + // namespace as the policy. + // + // +optional + Namespace *Namespace `json:"namespace,omitempty"` +} + +// LocalPolicyTargetReferenceWithSectionName identifies an API object to apply a +// direct policy to. This should be used as part of Policy resources that can +// target single resources. For more information on how this policy attachment +// mode works, and a sample Policy resource, refer to the policy attachment +// documentation for Gateway API. +// +// Note: This should only be used for direct policy attachment when references +// to SectionName are actually needed. In all other cases, +// LocalPolicyTargetReference should be used. +type LocalPolicyTargetReferenceWithSectionName struct { + LocalPolicyTargetReference `json:",inline"` + + // SectionName is the name of a section within the target resource. When + // unspecified, this targetRef targets the entire resource. In the following + // resources, SectionName is interpreted as the following: + // + // * Gateway: Listener name + // * HTTPRoute: HTTPRouteRule name + // * Service: Port name + // + // If a SectionName is specified, but does not exist on the targeted object, + // the Policy must fail to attach, and the policy implementation should record + // a `ResolvedRefs` or similar Condition in the Policy's status. + // + // +optional + SectionName *SectionName `json:"sectionName,omitempty"` +} + +// PolicyConditionType is a type of condition for a policy. This type should be +// used with a Policy resource Status.Conditions field. +type PolicyConditionType string + +// PolicyConditionReason is a reason for a policy condition. +type PolicyConditionReason string + +const ( + // PolicyConditionAccepted indicates whether the policy has been accepted or + // rejected by a targeted resource, and why. + // + // Possible reasons for this condition to be True are: + // + // * "Accepted" + // + // Possible reasons for this condition to be False are: + // + // * "Conflicted" + // * "Invalid" + // * "TargetNotFound" + // + PolicyConditionAccepted PolicyConditionType = "Accepted" + + // PolicyReasonAccepted is used with the "Accepted" condition when the policy + // has been accepted by the targeted resource. + PolicyReasonAccepted PolicyConditionReason = "Accepted" + + // PolicyReasonConflicted is used with the "Accepted" condition when the + // policy has not been accepted by a targeted resource because there is + // another policy that targets the same resource and a merge is not possible. + PolicyReasonConflicted PolicyConditionReason = "Conflicted" + + // PolicyReasonInvalid is used with the "Accepted" condition when the policy + // is syntactically or semantically invalid. + PolicyReasonInvalid PolicyConditionReason = "Invalid" + + // PolicyReasonTargetNotFound is used with the "Accepted" condition when the + // policy is attached to an invalid target resource. + PolicyReasonTargetNotFound PolicyConditionReason = "TargetNotFound" +) + +// PolicyAncestorStatus describes the status of a route with respect to an +// associated Ancestor. +// +// Ancestors refer to objects that are either the Target of a policy or above it +// in terms of object hierarchy. For example, if a policy targets a Service, the +// Policy's Ancestors are, in order, the Service, the HTTPRoute, the Gateway, and +// the GatewayClass. Almost always, in this hierarchy, the Gateway will be the most +// useful object to place Policy status on, so we recommend that implementations +// SHOULD use Gateway as the PolicyAncestorStatus object unless the designers +// have a _very_ good reason otherwise. +// +// In the context of policy attachment, the Ancestor is used to distinguish which +// resource results in a distinct application of this policy. For example, if a policy +// targets a Service, it may have a distinct result per attached Gateway. +// +// Policies targeting the same resource may have different effects depending on the +// ancestors of those resources. For example, different Gateways targeting the same +// Service may have different capabilities, especially if they have different underlying +// implementations. +// +// For example, in BackendTLSPolicy, the Policy attaches to a Service that is +// used as a backend in a HTTPRoute that is itself attached to a Gateway. +// In this case, the relevant object for status is the Gateway, and that is the +// ancestor object referred to in this status. +// +// Note that a parent is also an ancestor, so for objects where the parent is the +// relevant object for status, this struct SHOULD still be used. +// +// This struct is intended to be used in a slice that's effectively a map, +// with a composite key made up of the AncestorRef and the ControllerName. +type PolicyAncestorStatus struct { + // AncestorRef corresponds with a ParentRef in the spec that this + // PolicyAncestorStatus struct describes the status of. + // +required + AncestorRef ParentReference `json:"ancestorRef"` + + // ControllerName is a domain/path string that indicates the name of the + // controller that wrote this status. This corresponds with the + // controllerName field on GatewayClass. + // + // Example: "example.net/gateway-controller". + // + // The format of this field is DOMAIN "/" PATH, where DOMAIN and PATH are + // valid Kubernetes names + // (https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). + // + // Controllers MUST populate this field when writing status. Controllers should ensure that + // entries to status populated with their ControllerName are cleaned up when they are no + // longer necessary. + // +required + ControllerName GatewayController `json:"controllerName"` + + // Conditions describes the status of the Policy with respect to the given Ancestor. + // + // + // + // Notes for implementors: + // + // Conditions are a listType `map`, which means that they function like a + // map with a key of the `type` field _in the k8s apiserver_. + // + // This means that implementations must obey some rules when updating this + // section. + // + // * Implementations MUST perform a read-modify-write cycle on this field + // before modifying it. That is, when modifying this field, implementations + // must be confident they have fetched the most recent version of this field, + // and ensure that changes they make are on that recent version. + // * Implementations MUST NOT remove or reorder Conditions that they are not + // directly responsible for. For example, if an implementation sees a Condition + // with type `special.io/SomeField`, it MUST NOT remove, change or update that + // Condition. + // * Implementations MUST always _merge_ changes into Conditions of the same Type, + // rather than creating more than one Condition of the same Type. + // * Implementations MUST always update the `observedGeneration` field of the + // Condition to the `metadata.generation` of the Gateway at the time of update creation. + // * If the `observedGeneration` of a Condition is _greater than_ the value the + // implementation knows about, then it MUST NOT perform the update on that Condition, + // but must wait for a future reconciliation and status update. (The assumption is that + // the implementation's copy of the object is stale and an update will be re-triggered + // if relevant.) + // + // + // + // +required + // +listType=map + // +listMapKey=type + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=8 + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// PolicyStatus defines the common attributes that all Policies should include within +// their status. +type PolicyStatus struct { + // Ancestors is a list of ancestor resources (usually Gateways) that are + // associated with the policy, and the status of the policy with respect to + // each ancestor. When this policy attaches to a parent, the controller that + // manages the parent and the ancestors MUST add an entry to this list when + // the controller first sees the policy and SHOULD update the entry as + // appropriate when the relevant ancestor is modified. + // + // Note that choosing the relevant ancestor is left to the Policy designers; + // an important part of Policy design is designing the right object level at + // which to namespace this status. + // + // Note also that implementations MUST ONLY populate ancestor status for + // the Ancestor resources they are responsible for. Implementations MUST + // use the ControllerName field to uniquely identify the entries in this list + // that they are responsible for. + // + // Note that to achieve this, the list of PolicyAncestorStatus structs + // MUST be treated as a map with a composite key, made up of the AncestorRef + // and ControllerName fields combined. + // + // A maximum of 16 ancestors will be represented in this list. An empty list + // means the Policy is not relevant for any ancestors. + // + // If this slice is full, implementations MUST NOT add further entries. + // Instead they MUST consider the policy unimplementable and signal that + // on any related resources such as the ancestor that would be referenced + // here. For example, if this list was full on BackendTLSPolicy, no + // additional Gateways would be able to reference the Service targeted by + // the BackendTLSPolicy. + // + // +required + // +listType=atomic + // +kubebuilder:validation:MaxItems=16 + Ancestors []PolicyAncestorStatus `json:"ancestors"` +} diff --git a/vendor/sigs.k8s.io/gateway-api/apis/v1/shared_types.go b/vendor/sigs.k8s.io/gateway-api/apis/v1/shared_types.go index bed2cc8b8..eb8806837 100644 --- a/vendor/sigs.k8s.io/gateway-api/apis/v1/shared_types.go +++ b/vendor/sigs.k8s.io/gateway-api/apis/v1/shared_types.go @@ -86,6 +86,7 @@ type ParentReference struct { // Name is the name of the referent. // // Support: Core + // +required Name ObjectName `json:"name"` // SectionName is the name of a section within the target resource. In the @@ -148,9 +149,31 @@ type ParentReference struct { // Support: Extended // // +optional + // + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 Port *PortNumber `json:"port,omitempty"` } +// GatewayDefaultScope defines the set of default scopes that a Gateway +// can claim, for use in any Route type. At present the only supported +// scopes are "All" and "None". "None" is a special scope which +// explicitly means that the Route MUST NOT attached to any default +// Gateway. +// +// +kubebuilder:validation:Enum=All;None +type GatewayDefaultScope string + +const ( + // GatewayDefaultScopeAll indicates that a Gateway can claim absolutely + // any Route asking for a default Gateway. + GatewayDefaultScopeAll GatewayDefaultScope = "All" + + // GatewayDefaultScopeNone indicates that a Gateway MUST NOT claim + // any Route asking for a default Gateway. + GatewayDefaultScopeNone GatewayDefaultScope = "None" +) + // CommonRouteSpec defines the common attributes that all Routes MUST include // within their spec. type CommonRouteSpec struct { @@ -218,19 +241,34 @@ type CommonRouteSpec struct { // // // +optional + // +listType=atomic // +kubebuilder:validation:MaxItems=32 // // // // ParentRefs []ParentReference `json:"parentRefs,omitempty"` + + // UseDefaultGateways indicates the default Gateway scope to use for this + // Route. If unset (the default) or set to None, the Route will not be + // attached to any default Gateway; if set, it will be attached to any + // default Gateway supporting the named scope, subject to the usual rules + // about which Routes a Gateway is allowed to claim. + // + // Think carefully before using this functionality! The set of default + // Gateways supporting the requested scope can change over time without + // any notice to the Route author, and in many situations it will not be + // appropriate to request a default Gateway for a given Route -- for + // example, a Route with specific security requirements should almost + // certainly not use a default Gateway. + // + // +optional + // + UseDefaultGateways GatewayDefaultScope `json:"useDefaultGateways,omitempty"` } // PortNumber defines a network port. -// -// +kubebuilder:validation:Minimum=1 -// +kubebuilder:validation:Maximum=65535 -type PortNumber int32 +type PortNumber = int32 // BackendRef defines how a Route should forward a request to a Kubernetes // resource. @@ -436,6 +474,7 @@ const ( type RouteParentStatus struct { // ParentRef corresponds with a ParentRef in the spec that this // RouteParentStatus struct describes the status of. + // +required ParentRef ParentReference `json:"parentRef"` // ControllerName is a domain/path string that indicates the name of the @@ -451,6 +490,7 @@ type RouteParentStatus struct { // Controllers MUST populate this field when writing status. Controllers should ensure that // entries to status populated with their ControllerName are cleaned up when they are no // longer necessary. + // +required ControllerName GatewayController `json:"controllerName"` // Conditions describes the status of the route with respect to the Gateway. @@ -469,14 +509,45 @@ type RouteParentStatus struct { // There are a number of cases where the "Accepted" condition may not be set // due to lack of controller visibility, that includes when: // - // * The Route refers to a non-existent parent. + // * The Route refers to a nonexistent parent. // * The Route is of a type that the controller does not support. // * The Route is in a namespace the controller does not have access to. // + // + // + // Notes for implementors: + // + // Conditions are a listType `map`, which means that they function like a + // map with a key of the `type` field _in the k8s apiserver_. + // + // This means that implementations must obey some rules when updating this + // section. + // + // * Implementations MUST perform a read-modify-write cycle on this field + // before modifying it. That is, when modifying this field, implementations + // must be confident they have fetched the most recent version of this field, + // and ensure that changes they make are on that recent version. + // * Implementations MUST NOT remove or reorder Conditions that they are not + // directly responsible for. For example, if an implementation sees a Condition + // with type `special.io/SomeField`, it MUST NOT remove, change or update that + // Condition. + // * Implementations MUST always _merge_ changes into Conditions of the same Type, + // rather than creating more than one Condition of the same Type. + // * Implementations MUST always update the `observedGeneration` field of the + // Condition to the `metadata.generation` of the Gateway at the time of update creation. + // * If the `observedGeneration` of a Condition is _greater than_ the value the + // implementation knows about, then it MUST NOT perform the update on that Condition, + // but must wait for a future reconciliation and status update. (The assumption is that + // the implementation's copy of the object is stale and an update will be re-triggered + // if relevant.) + // + // + // // +listType=map // +listMapKey=type // +kubebuilder:validation:MinItems=1 // +kubebuilder:validation:MaxItems=8 + // +required Conditions []metav1.Condition `json:"conditions,omitempty"` } @@ -498,6 +569,31 @@ type RouteStatus struct { // A maximum of 32 Gateways will be represented in this list. An empty list // means the route has not been attached to any Gateway. // + // + // Notes for implementors: + // + // While parents is not a listType `map`, this is due to the fact that the + // list key is not scalar, and Kubernetes is unable to represent this. + // + // Parent status MUST be considered to be namespaced by the combination of + // the parentRef and controllerName fields, and implementations should keep + // the following rules in mind when updating this status: + // + // * Implementations MUST update only entries that have a matching value of + // `controllerName` for that implementation. + // * Implementations MUST NOT update entries with non-matching `controllerName` + // fields. + // * Implementations MUST treat each `parentRef`` in the Route separately and + // update its status based on the relationship with that parent. + // * Implementations MUST perform a read-modify-write cycle on this field + // before modifying it. That is, when modifying this field, implementations + // must be confident they have fetched the most recent version of this field, + // and ensure that changes they make are on that recent version. + // + // + // + // +required + // +listType=atomic // +kubebuilder:validation:MaxItems=32 Parents []RouteParentStatus `json:"parents"` } @@ -535,6 +631,30 @@ type Hostname string // +kubebuilder:validation:Pattern=`^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$` type PreciseHostname string +// AbsoluteURI represents a Uniform Resource Identifier (URI) as defined by RFC3986. + +// The AbsoluteURI MUST NOT be a relative URI, and it MUST follow the URI syntax and +// encoding rules specified in RFC3986. The AbsoluteURI MUST include both a +// scheme (e.g., "http" or "spiffe") and a scheme-specific-part. URIs that +// include an authority MUST include a fully qualified domain name or +// IP address as the host. +// The below regex is taken from the regex section in RFC 3986 with a slight modification to enforce a full URI and not relative. +// +kubebuilder:validation:MinLength=1 +// +kubebuilder:validation:MaxLength=253 +// +kubebuilder:validation:Pattern=`^(([^:/?#]+):)(//([^/?#]*))([^?#]*)(\?([^#]*))?(#(.*))?` +type AbsoluteURI string + +// The CORSOrigin MUST NOT be a relative URI, and it MUST follow the URI syntax and +// encoding rules specified in RFC3986. The CORSOrigin MUST include both a +// scheme (e.g., "http" or "spiffe") and a scheme-specific-part, or it should be a single '*' character. +// URIs that include an authority MUST include a fully qualified domain name or +// IP address as the host. +// The below regex was generated to simplify the assertion of scheme://host: being port optional +// +kubebuilder:validation:MinLength=1 +// +kubebuilder:validation:MaxLength=253 +// +kubebuilder:validation:Pattern=`(^\*$)|(^([a-zA-Z][a-zA-Z0-9+\-.]+):\/\/([^:/?#]+)(:([0-9]{1,5}))?$)` +type CORSOrigin string + // Group refers to a Kubernetes Group. It must either be an empty string or a // RFC 1123 subdomain. // @@ -662,11 +782,11 @@ type GatewayController string // Invalid values include: // // * example~ - "~" is an invalid character -// * example.com. - can not start or end with "." +// * example.com. - cannot start or end with "." // // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=253 -// +kubebuilder:validation:Pattern=`^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]/?)*$` +// +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?([A-Za-z0-9][-A-Za-z0-9_.]{0,61})?[A-Za-z0-9]$` type AnnotationKey string // AnnotationValue is the value of an annotation in Gateway API. This is used @@ -678,6 +798,45 @@ type AnnotationKey string // +kubebuilder:validation:MaxLength=4096 type AnnotationValue string +// LabelKey is the key of a label in the Gateway API. This is used for validation +// of maps such as Gateway infrastructure labels. This matches the Kubernetes +// "qualified name" validation that is used for labels. +// +// Valid values include: +// +// * example +// * example.com +// * example.com/path +// * example.com/path.html +// +// Invalid values include: +// +// * example~ - "~" is an invalid character +// * example.com. - cannot start or end with "." +// +// +kubebuilder:validation:MinLength=1 +// +kubebuilder:validation:MaxLength=253 +// +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?([A-Za-z0-9][-A-Za-z0-9_.]{0,61})?[A-Za-z0-9]$` +type LabelKey string + +// LabelValue is the value of a label in the Gateway API. This is used for validation +// of maps such as Gateway infrastructure labels. This matches the Kubernetes +// label validation rules: +// * must be 63 characters or less (can be empty), +// * unless empty, must begin and end with an alphanumeric character ([a-z0-9A-Z]), +// * could contain dashes (-), underscores (_), dots (.), and alphanumerics between. +// +// Valid values include: +// +// * MyValue +// * my.name +// * 123-my-value +// +// +kubebuilder:validation:MinLength=0 +// +kubebuilder:validation:MaxLength=63 +// +kubebuilder:validation:Pattern=`^(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?$` +type LabelValue string + // AddressType defines how a network address is represented as a text string. // This may take two possible forms: // @@ -719,7 +878,7 @@ const ( // (see [RFC 5952](https://tools.ietf.org/html/rfc5952)). // // This type is intended for specific addresses. Address ranges are not - // supported (e.g. you can not use a CIDR range like 127.0.0.0/24 as an + // supported (e.g. you cannot use a CIDR range like 127.0.0.0/24 as an // IPAddress). // // Support: Extended @@ -745,7 +904,7 @@ const ( ) // SessionPersistence defines the desired state of SessionPersistence. -// +kubebuilder:validation:XValidation:message="AbsoluteTimeout must be specified when cookie lifetimeType is Permanent",rule="!has(self.cookieConfig.lifetimeType) || self.cookieConfig.lifetimeType != 'Permanent' || has(self.absoluteTimeout)" +// +kubebuilder:validation:XValidation:message="AbsoluteTimeout must be specified when cookie lifetimeType is Permanent",rule="!has(self.cookieConfig) || !has(self.cookieConfig.lifetimeType) || self.cookieConfig.lifetimeType != 'Permanent' || has(self.absoluteTimeout)" type SessionPersistence struct { // SessionName defines the name of the persistent session token // which may be reflected in the cookie or the header. Users @@ -830,6 +989,8 @@ type CookieConfig struct { // absolute lifetime of the cookie tracked by the gateway and // is optional. // + // Defaults to "Session". + // // Support: Core for "Session" type // // Support: Extended for "Permanent" type @@ -855,3 +1016,15 @@ const ( // Support: Extended PermanentCookieLifetimeType CookieLifetimeType = "Permanent" ) + +// +kubebuilder:validation:XValidation:message="numerator must be less than or equal to denominator",rule="self.numerator <= self.denominator" +type Fraction struct { + // +kubebuilder:validation:Minimum=0 + // +required + Numerator int32 `json:"numerator"` + + // +optional + // +kubebuilder:default=100 + // +kubebuilder:validation:Minimum=1 + Denominator *int32 `json:"denominator,omitempty"` +} diff --git a/vendor/sigs.k8s.io/gateway-api/apis/v1/zz_generated.deepcopy.go b/vendor/sigs.k8s.io/gateway-api/apis/v1/zz_generated.deepcopy.go index ddb9bb9d4..174c29270 100644 --- a/vendor/sigs.k8s.io/gateway-api/apis/v1/zz_generated.deepcopy.go +++ b/vendor/sigs.k8s.io/gateway-api/apis/v1/zz_generated.deepcopy.go @@ -25,6 +25,26 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllowedListeners) DeepCopyInto(out *AllowedListeners) { + *out = *in + if in.Namespaces != nil { + in, out := &in.Namespaces, &out.Namespaces + *out = new(ListenerNamespaces) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedListeners. +func (in *AllowedListeners) DeepCopy() *AllowedListeners { + if in == nil { + return nil + } + out := new(AllowedListeners) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AllowedRoutes) DeepCopyInto(out *AllowedRoutes) { *out = *in @@ -108,6 +128,125 @@ func (in *BackendRef) DeepCopy() *BackendRef { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendTLSPolicy) DeepCopyInto(out *BackendTLSPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendTLSPolicy. +func (in *BackendTLSPolicy) DeepCopy() *BackendTLSPolicy { + if in == nil { + return nil + } + out := new(BackendTLSPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackendTLSPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendTLSPolicyList) DeepCopyInto(out *BackendTLSPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BackendTLSPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendTLSPolicyList. +func (in *BackendTLSPolicyList) DeepCopy() *BackendTLSPolicyList { + if in == nil { + return nil + } + out := new(BackendTLSPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackendTLSPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendTLSPolicySpec) DeepCopyInto(out *BackendTLSPolicySpec) { + *out = *in + if in.TargetRefs != nil { + in, out := &in.TargetRefs, &out.TargetRefs + *out = make([]LocalPolicyTargetReferenceWithSectionName, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Validation.DeepCopyInto(&out.Validation) + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make(map[AnnotationKey]AnnotationValue, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendTLSPolicySpec. +func (in *BackendTLSPolicySpec) DeepCopy() *BackendTLSPolicySpec { + if in == nil { + return nil + } + out := new(BackendTLSPolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendTLSPolicyValidation) DeepCopyInto(out *BackendTLSPolicyValidation) { + *out = *in + if in.CACertificateRefs != nil { + in, out := &in.CACertificateRefs, &out.CACertificateRefs + *out = make([]LocalObjectReference, len(*in)) + copy(*out, *in) + } + if in.WellKnownCACertificates != nil { + in, out := &in.WellKnownCACertificates, &out.WellKnownCACertificates + *out = new(WellKnownCACertificatesType) + **out = **in + } + if in.SubjectAltNames != nil { + in, out := &in.SubjectAltNames, &out.SubjectAltNames + *out = make([]SubjectAltName, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendTLSPolicyValidation. +func (in *BackendTLSPolicyValidation) DeepCopy() *BackendTLSPolicyValidation { + if in == nil { + return nil + } + out := new(BackendTLSPolicyValidation) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CommonRouteSpec) DeepCopyInto(out *CommonRouteSpec) { *out = *in @@ -150,6 +289,64 @@ func (in *CookieConfig) DeepCopy() *CookieConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ForwardBodyConfig) DeepCopyInto(out *ForwardBodyConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ForwardBodyConfig. +func (in *ForwardBodyConfig) DeepCopy() *ForwardBodyConfig { + if in == nil { + return nil + } + out := new(ForwardBodyConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Fraction) DeepCopyInto(out *Fraction) { + *out = *in + if in.Denominator != nil { + in, out := &in.Denominator, &out.Denominator + *out = new(int32) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Fraction. +func (in *Fraction) DeepCopy() *Fraction { + if in == nil { + return nil + } + out := new(Fraction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontendTLSConfig) DeepCopyInto(out *FrontendTLSConfig) { + *out = *in + in.Default.DeepCopyInto(&out.Default) + if in.PerPort != nil { + in, out := &in.PerPort, &out.PerPort + *out = make([]TLSPortConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontendTLSConfig. +func (in *FrontendTLSConfig) DeepCopy() *FrontendTLSConfig { + if in == nil { + return nil + } + out := new(FrontendTLSConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FrontendTLSValidation) DeepCopyInto(out *FrontendTLSValidation) { *out = *in @@ -172,6 +369,26 @@ func (in *FrontendTLSValidation) DeepCopy() *FrontendTLSValidation { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GRPCAuthConfig) DeepCopyInto(out *GRPCAuthConfig) { + *out = *in + if in.AllowedRequestHeaders != nil { + in, out := &in.AllowedRequestHeaders, &out.AllowedRequestHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCAuthConfig. +func (in *GRPCAuthConfig) DeepCopy() *GRPCAuthConfig { + if in == nil { + return nil + } + out := new(GRPCAuthConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GRPCBackendRef) DeepCopyInto(out *GRPCBackendRef) { *out = *in @@ -200,7 +417,7 @@ func (in *GRPCHeaderMatch) DeepCopyInto(out *GRPCHeaderMatch) { *out = *in if in.Type != nil { in, out := &in.Type, &out.Type - *out = new(HeaderMatchType) + *out = new(GRPCHeaderMatchType) **out = **in } } @@ -369,6 +586,11 @@ func (in *GRPCRouteMatch) DeepCopy() *GRPCRouteMatch { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GRPCRouteRule) DeepCopyInto(out *GRPCRouteRule) { *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(SectionName) + **out = **in + } if in.Matches != nil { in, out := &in.Matches, &out.Matches *out = make([]GRPCRouteMatch, len(*in)) @@ -479,21 +701,21 @@ func (in *Gateway) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GatewayAddress) DeepCopyInto(out *GatewayAddress) { +func (in *GatewayBackendTLS) DeepCopyInto(out *GatewayBackendTLS) { *out = *in - if in.Type != nil { - in, out := &in.Type, &out.Type - *out = new(AddressType) - **out = **in + if in.ClientCertificateRef != nil { + in, out := &in.ClientCertificateRef, &out.ClientCertificateRef + *out = new(SecretObjectReference) + (*in).DeepCopyInto(*out) } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayAddress. -func (in *GatewayAddress) DeepCopy() *GatewayAddress { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayBackendTLS. +func (in *GatewayBackendTLS) DeepCopy() *GatewayBackendTLS { if in == nil { return nil } - out := new(GatewayAddress) + out := new(GatewayBackendTLS) in.DeepCopyInto(out) return out } @@ -614,7 +836,7 @@ func (in *GatewayInfrastructure) DeepCopyInto(out *GatewayInfrastructure) { *out = *in if in.Labels != nil { in, out := &in.Labels, &out.Labels - *out = make(map[AnnotationKey]AnnotationValue, len(*in)) + *out = make(map[LabelKey]LabelValue, len(*in)) for key, val := range *in { (*out)[key] = val } @@ -687,7 +909,7 @@ func (in *GatewaySpec) DeepCopyInto(out *GatewaySpec) { } if in.Addresses != nil { in, out := &in.Addresses, &out.Addresses - *out = make([]GatewayAddress, len(*in)) + *out = make([]GatewaySpecAddress, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -697,6 +919,16 @@ func (in *GatewaySpec) DeepCopyInto(out *GatewaySpec) { *out = new(GatewayInfrastructure) (*in).DeepCopyInto(*out) } + if in.AllowedListeners != nil { + in, out := &in.AllowedListeners, &out.AllowedListeners + *out = new(AllowedListeners) + (*in).DeepCopyInto(*out) + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(GatewayTLSConfig) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewaySpec. @@ -709,6 +941,26 @@ func (in *GatewaySpec) DeepCopy() *GatewaySpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewaySpecAddress) DeepCopyInto(out *GatewaySpecAddress) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(AddressType) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewaySpecAddress. +func (in *GatewaySpecAddress) DeepCopy() *GatewaySpecAddress { + if in == nil { + return nil + } + out := new(GatewaySpecAddress) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GatewayStatus) DeepCopyInto(out *GatewayStatus) { *out = *in @@ -768,29 +1020,15 @@ func (in *GatewayStatusAddress) DeepCopy() *GatewayStatusAddress { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GatewayTLSConfig) DeepCopyInto(out *GatewayTLSConfig) { *out = *in - if in.Mode != nil { - in, out := &in.Mode, &out.Mode - *out = new(TLSModeType) - **out = **in - } - if in.CertificateRefs != nil { - in, out := &in.CertificateRefs, &out.CertificateRefs - *out = make([]SecretObjectReference, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.FrontendValidation != nil { - in, out := &in.FrontendValidation, &out.FrontendValidation - *out = new(FrontendTLSValidation) + if in.Backend != nil { + in, out := &in.Backend, &out.Backend + *out = new(GatewayBackendTLS) (*in).DeepCopyInto(*out) } - if in.Options != nil { - in, out := &in.Options, &out.Options - *out = make(map[AnnotationKey]AnnotationValue, len(*in)) - for key, val := range *in { - (*out)[key] = val - } + if in.Frontend != nil { + in, out := &in.Frontend, &out.Frontend + *out = new(FrontendTLSConfig) + (*in).DeepCopyInto(*out) } } @@ -804,6 +1042,31 @@ func (in *GatewayTLSConfig) DeepCopy() *GatewayTLSConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPAuthConfig) DeepCopyInto(out *HTTPAuthConfig) { + *out = *in + if in.AllowedRequestHeaders != nil { + in, out := &in.AllowedRequestHeaders, &out.AllowedRequestHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AllowedResponseHeaders != nil { + in, out := &in.AllowedResponseHeaders, &out.AllowedResponseHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPAuthConfig. +func (in *HTTPAuthConfig) DeepCopy() *HTTPAuthConfig { + if in == nil { + return nil + } + out := new(HTTPAuthConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HTTPBackendRef) DeepCopyInto(out *HTTPBackendRef) { *out = *in @@ -827,6 +1090,77 @@ func (in *HTTPBackendRef) DeepCopy() *HTTPBackendRef { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPCORSFilter) DeepCopyInto(out *HTTPCORSFilter) { + *out = *in + if in.AllowOrigins != nil { + in, out := &in.AllowOrigins, &out.AllowOrigins + *out = make([]CORSOrigin, len(*in)) + copy(*out, *in) + } + if in.AllowCredentials != nil { + in, out := &in.AllowCredentials, &out.AllowCredentials + *out = new(bool) + **out = **in + } + if in.AllowMethods != nil { + in, out := &in.AllowMethods, &out.AllowMethods + *out = make([]HTTPMethodWithWildcard, len(*in)) + copy(*out, *in) + } + if in.AllowHeaders != nil { + in, out := &in.AllowHeaders, &out.AllowHeaders + *out = make([]HTTPHeaderName, len(*in)) + copy(*out, *in) + } + if in.ExposeHeaders != nil { + in, out := &in.ExposeHeaders, &out.ExposeHeaders + *out = make([]HTTPHeaderName, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPCORSFilter. +func (in *HTTPCORSFilter) DeepCopy() *HTTPCORSFilter { + if in == nil { + return nil + } + out := new(HTTPCORSFilter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPExternalAuthFilter) DeepCopyInto(out *HTTPExternalAuthFilter) { + *out = *in + in.BackendRef.DeepCopyInto(&out.BackendRef) + if in.GRPCAuthConfig != nil { + in, out := &in.GRPCAuthConfig, &out.GRPCAuthConfig + *out = new(GRPCAuthConfig) + (*in).DeepCopyInto(*out) + } + if in.HTTPAuthConfig != nil { + in, out := &in.HTTPAuthConfig, &out.HTTPAuthConfig + *out = new(HTTPAuthConfig) + (*in).DeepCopyInto(*out) + } + if in.ForwardBody != nil { + in, out := &in.ForwardBody, &out.ForwardBody + *out = new(ForwardBodyConfig) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPExternalAuthFilter. +func (in *HTTPExternalAuthFilter) DeepCopy() *HTTPExternalAuthFilter { + if in == nil { + return nil + } + out := new(HTTPExternalAuthFilter) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HTTPHeader) DeepCopyInto(out *HTTPHeader) { *out = *in @@ -966,6 +1300,16 @@ func (in *HTTPQueryParamMatch) DeepCopy() *HTTPQueryParamMatch { func (in *HTTPRequestMirrorFilter) DeepCopyInto(out *HTTPRequestMirrorFilter) { *out = *in in.BackendRef.DeepCopyInto(&out.BackendRef) + if in.Percent != nil { + in, out := &in.Percent, &out.Percent + *out = new(int32) + **out = **in + } + if in.Fraction != nil { + in, out := &in.Fraction, &out.Fraction + *out = new(Fraction) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRequestMirrorFilter. @@ -1073,6 +1417,16 @@ func (in *HTTPRouteFilter) DeepCopyInto(out *HTTPRouteFilter) { *out = new(HTTPURLRewriteFilter) (*in).DeepCopyInto(*out) } + if in.CORS != nil { + in, out := &in.CORS, &out.CORS + *out = new(HTTPCORSFilter) + (*in).DeepCopyInto(*out) + } + if in.ExternalAuth != nil { + in, out := &in.ExternalAuth, &out.ExternalAuth + *out = new(HTTPExternalAuthFilter) + (*in).DeepCopyInto(*out) + } if in.ExtensionRef != nil { in, out := &in.ExtensionRef, &out.ExtensionRef *out = new(LocalObjectReference) @@ -1161,9 +1515,44 @@ func (in *HTTPRouteMatch) DeepCopy() *HTTPRouteMatch { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRouteRetry) DeepCopyInto(out *HTTPRouteRetry) { + *out = *in + if in.Codes != nil { + in, out := &in.Codes, &out.Codes + *out = make([]HTTPRouteRetryStatusCode, len(*in)) + copy(*out, *in) + } + if in.Attempts != nil { + in, out := &in.Attempts, &out.Attempts + *out = new(int) + **out = **in + } + if in.Backoff != nil { + in, out := &in.Backoff, &out.Backoff + *out = new(Duration) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRouteRetry. +func (in *HTTPRouteRetry) DeepCopy() *HTTPRouteRetry { + if in == nil { + return nil + } + out := new(HTTPRouteRetry) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HTTPRouteRule) DeepCopyInto(out *HTTPRouteRule) { *out = *in + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(SectionName) + **out = **in + } if in.Matches != nil { in, out := &in.Matches, &out.Matches *out = make([]HTTPRouteMatch, len(*in)) @@ -1190,6 +1579,11 @@ func (in *HTTPRouteRule) DeepCopyInto(out *HTTPRouteRule) { *out = new(HTTPRouteTimeouts) (*in).DeepCopyInto(*out) } + if in.Retry != nil { + in, out := &in.Retry, &out.Retry + *out = new(HTTPRouteRetry) + (*in).DeepCopyInto(*out) + } if in.SessionPersistence != nil { in, out := &in.SessionPersistence, &out.SessionPersistence *out = new(SessionPersistence) @@ -1311,7 +1705,7 @@ func (in *Listener) DeepCopyInto(out *Listener) { } if in.TLS != nil { in, out := &in.TLS, &out.TLS - *out = new(GatewayTLSConfig) + *out = new(ListenerTLSConfig) (*in).DeepCopyInto(*out) } if in.AllowedRoutes != nil { @@ -1331,6 +1725,31 @@ func (in *Listener) DeepCopy() *Listener { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerNamespaces) DeepCopyInto(out *ListenerNamespaces) { + *out = *in + if in.From != nil { + in, out := &in.From, &out.From + *out = new(FromNamespaces) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerNamespaces. +func (in *ListenerNamespaces) DeepCopy() *ListenerNamespaces { + if in == nil { + return nil + } + out := new(ListenerNamespaces) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ListenerStatus) DeepCopyInto(out *ListenerStatus) { *out = *in @@ -1360,6 +1779,40 @@ func (in *ListenerStatus) DeepCopy() *ListenerStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListenerTLSConfig) DeepCopyInto(out *ListenerTLSConfig) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(TLSModeType) + **out = **in + } + if in.CertificateRefs != nil { + in, out := &in.CertificateRefs, &out.CertificateRefs + *out = make([]SecretObjectReference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make(map[AnnotationKey]AnnotationValue, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListenerTLSConfig. +func (in *ListenerTLSConfig) DeepCopy() *ListenerTLSConfig { + if in == nil { + return nil + } + out := new(ListenerTLSConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LocalObjectReference) DeepCopyInto(out *LocalObjectReference) { *out = *in @@ -1390,6 +1843,62 @@ func (in *LocalParametersReference) DeepCopy() *LocalParametersReference { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalPolicyTargetReference) DeepCopyInto(out *LocalPolicyTargetReference) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalPolicyTargetReference. +func (in *LocalPolicyTargetReference) DeepCopy() *LocalPolicyTargetReference { + if in == nil { + return nil + } + out := new(LocalPolicyTargetReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalPolicyTargetReferenceWithSectionName) DeepCopyInto(out *LocalPolicyTargetReferenceWithSectionName) { + *out = *in + out.LocalPolicyTargetReference = in.LocalPolicyTargetReference + if in.SectionName != nil { + in, out := &in.SectionName, &out.SectionName + *out = new(SectionName) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalPolicyTargetReferenceWithSectionName. +func (in *LocalPolicyTargetReferenceWithSectionName) DeepCopy() *LocalPolicyTargetReferenceWithSectionName { + if in == nil { + return nil + } + out := new(LocalPolicyTargetReferenceWithSectionName) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamespacedPolicyTargetReference) DeepCopyInto(out *NamespacedPolicyTargetReference) { + *out = *in + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(Namespace) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespacedPolicyTargetReference. +func (in *NamespacedPolicyTargetReference) DeepCopy() *NamespacedPolicyTargetReference { + if in == nil { + return nil + } + out := new(NamespacedPolicyTargetReference) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ObjectReference) DeepCopyInto(out *ObjectReference) { *out = *in @@ -1470,6 +1979,51 @@ func (in *ParentReference) DeepCopy() *ParentReference { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyAncestorStatus) DeepCopyInto(out *PolicyAncestorStatus) { + *out = *in + in.AncestorRef.DeepCopyInto(&out.AncestorRef) + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyAncestorStatus. +func (in *PolicyAncestorStatus) DeepCopy() *PolicyAncestorStatus { + if in == nil { + return nil + } + out := new(PolicyAncestorStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyStatus) DeepCopyInto(out *PolicyStatus) { + *out = *in + if in.Ancestors != nil { + in, out := &in.Ancestors, &out.Ancestors + *out = make([]PolicyAncestorStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyStatus. +func (in *PolicyStatus) DeepCopy() *PolicyStatus { + if in == nil { + return nil + } + out := new(PolicyStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RouteGroupKind) DeepCopyInto(out *RouteGroupKind) { *out = *in @@ -1629,3 +2183,69 @@ func (in *SessionPersistence) DeepCopy() *SessionPersistence { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubjectAltName) DeepCopyInto(out *SubjectAltName) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectAltName. +func (in *SubjectAltName) DeepCopy() *SubjectAltName { + if in == nil { + return nil + } + out := new(SubjectAltName) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SupportedFeature) DeepCopyInto(out *SupportedFeature) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SupportedFeature. +func (in *SupportedFeature) DeepCopy() *SupportedFeature { + if in == nil { + return nil + } + out := new(SupportedFeature) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSConfig) DeepCopyInto(out *TLSConfig) { + *out = *in + if in.Validation != nil { + in, out := &in.Validation, &out.Validation + *out = new(FrontendTLSValidation) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSConfig. +func (in *TLSConfig) DeepCopy() *TLSConfig { + if in == nil { + return nil + } + out := new(TLSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSPortConfig) DeepCopyInto(out *TLSPortConfig) { + *out = *in + in.TLS.DeepCopyInto(&out.TLS) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSPortConfig. +func (in *TLSPortConfig) DeepCopy() *TLSPortConfig { + if in == nil { + return nil + } + out := new(TLSPortConfig) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/sigs.k8s.io/gateway-api/apis/v1/zz_generated.register.go b/vendor/sigs.k8s.io/gateway-api/apis/v1/zz_generated.register.go index 9c8db216a..1f390588e 100644 --- a/vendor/sigs.k8s.io/gateway-api/apis/v1/zz_generated.register.go +++ b/vendor/sigs.k8s.io/gateway-api/apis/v1/zz_generated.register.go @@ -22,16 +22,16 @@ limitations under the License. package v1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" ) // GroupName specifies the group name used to register the objects. const GroupName = "gateway.networking.k8s.io" // GroupVersion specifies the group and the version used to register the objects. -var GroupVersion = v1.GroupVersion{Group: GroupName, Version: "v1"} +var GroupVersion = metav1.GroupVersion{Group: GroupName, Version: "v1"} // SchemeGroupVersion is group version used to register these objects // Deprecated: use GroupVersion instead. @@ -61,6 +61,8 @@ func init() { // Adds the list of known types to Scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, + &BackendTLSPolicy{}, + &BackendTLSPolicyList{}, &GRPCRoute{}, &GRPCRouteList{}, &Gateway{}, @@ -71,6 +73,6 @@ func addKnownTypes(scheme *runtime.Scheme) error { &HTTPRouteList{}, ) // AddToGroupVersion allows the serialization of client types like ListOptions. - v1.AddToGroupVersion(scheme, SchemeGroupVersion) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil } diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go index d538ac119..3fe528bbf 100644 --- a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go +++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go @@ -52,8 +52,8 @@ import ( // - bool, for JSON booleans // - float64, for JSON numbers // - string, for JSON strings -// - []interface{}, for JSON arrays -// - map[string]interface{}, for JSON objects +// - []any, for JSON arrays +// - map[string]any, for JSON objects // - nil for JSON null // // To unmarshal a JSON array into a slice, Unmarshal resets the slice length @@ -117,9 +117,6 @@ func Unmarshal(data []byte, v any, opts ...UnmarshalOpt) error { // The input can be assumed to be a valid encoding of // a JSON value. UnmarshalJSON must copy the JSON data // if it wishes to retain the data after returning. -// -// By convention, to approximate the behavior of [Unmarshal] itself, -// Unmarshalers implement UnmarshalJSON([]byte("null")) as a no-op. type Unmarshaler interface { UnmarshalJSON([]byte) error } @@ -132,7 +129,7 @@ type UnmarshalTypeError struct { Type reflect.Type // type of Go value it could not be assigned to Offset int64 // error occurred after reading Offset bytes Struct string // name of the struct type containing the field - Field string // the full path from root node to the field + Field string // the full path from root node to the field, include embedded struct } func (e *UnmarshalTypeError) Error() string { @@ -281,7 +278,11 @@ func (d *decodeState) addErrorContext(err error) error { switch err := err.(type) { case *UnmarshalTypeError: err.Struct = d.errorContext.Struct.Name() - err.Field = strings.Join(d.errorContext.FieldStack, ".") + fieldStack := d.errorContext.FieldStack + if err.Field != "" { + fieldStack = append(fieldStack, err.Field) + } + err.Field = strings.Join(fieldStack, ".") } } return err @@ -492,9 +493,9 @@ func indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnm } // Prevent infinite loop if v is an interface pointing to its own address: - // var v interface{} + // var v any // v = &v - if v.Elem().Kind() == reflect.Interface && v.Elem().Elem() == v { + if v.Elem().Kind() == reflect.Interface && v.Elem().Elem().Equal(v) { v = v.Elem() break } @@ -784,7 +785,10 @@ func (d *decodeState) object(v reflect.Value) error { } subv = v destring = f.quoted - for _, i := range f.index { + if d.errorContext == nil { + d.errorContext = new(errorContext) + } + for i, ind := range f.index { if subv.Kind() == reflect.Pointer { if subv.IsNil() { // If a struct embeds a pointer to an unexported type, @@ -804,13 +808,16 @@ func (d *decodeState) object(v reflect.Value) error { } subv = subv.Elem() } - subv = subv.Field(i) - } - if d.errorContext == nil { - d.errorContext = new(errorContext) + if i < len(f.index)-1 { + d.errorContext.FieldStack = append( + d.errorContext.FieldStack, + subv.Type().Field(ind).Name, + ) + } + subv = subv.Field(ind) } - d.errorContext.FieldStack = append(d.errorContext.FieldStack, f.name) d.errorContext.Struct = t + d.errorContext.FieldStack = append(d.errorContext.FieldStack, f.name) d.appendStrictFieldStackKey(f.name) } else if d.disallowUnknownFields { d.saveStrictError(d.newFieldError(unknownStrictErrType, string(key))) @@ -1118,7 +1125,7 @@ func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool // in an empty interface. They are not strictly necessary, // but they avoid the weight of reflection in this common case. -// valueInterface is like value but returns interface{} +// valueInterface is like value but returns any. func (d *decodeState) valueInterface() (val any) { switch d.opcode { default: @@ -1135,7 +1142,7 @@ func (d *decodeState) valueInterface() (val any) { return } -// arrayInterface is like array but returns []interface{}. +// arrayInterface is like array but returns []any. func (d *decodeState) arrayInterface() []any { origStrictFieldStackLen := len(d.strictFieldStack) defer func() { @@ -1170,7 +1177,7 @@ func (d *decodeState) arrayInterface() []any { return v } -// objectInterface is like object but returns map[string]interface{}. +// objectInterface is like object but returns map[string]any. func (d *decodeState) objectInterface() map[string]any { origStrictFieldStackLen := len(d.strictFieldStack) defer func() { diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go index eb73bff58..4e3a1a2f1 100644 --- a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go +++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go @@ -71,8 +71,8 @@ import ( // // The "omitempty" option specifies that the field should be omitted // from the encoding if the field has an empty value, defined as -// false, 0, a nil pointer, a nil interface value, and any empty array, -// slice, map, or string. +// false, 0, a nil pointer, a nil interface value, and any array, +// slice, map, or string of length zero. // // As a special case, if the field tag is "-", the field is always omitted. // Note that a field with name "-" can still be generated using the tag "-,". @@ -98,6 +98,17 @@ import ( // // Field appears in JSON as key "-". // Field int `json:"-,"` // +// The "omitzero" option specifies that the field should be omitted +// from the encoding if the field has a zero value, according to rules: +// +// 1) If the field type has an "IsZero() bool" method, that will be used to +// determine whether the value is zero. +// +// 2) Otherwise, the value is zero if it is the zero value for its type. +// +// If both "omitempty" and "omitzero" are specified, the field will be omitted +// if the value is either empty or zero (or both). +// // The "string" option signals that a field is stored as JSON inside a // JSON-encoded string. It applies only to fields of string, floating point, // integer, or boolean types. This extra level of encoding is sometimes used @@ -690,7 +701,8 @@ FieldLoop: fv = fv.Field(i) } - if f.omitEmpty && isEmptyValue(fv) { + if (f.omitEmpty && isEmptyValue(fv)) || + (f.omitZero && (f.isZero == nil && fv.IsZero() || (f.isZero != nil && f.isZero(fv)))) { continue } e.WriteByte(next) @@ -808,7 +820,7 @@ func (se sliceEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) { // Here we use a struct to memorize the pointer to the first element of the slice // and its length. ptr := struct { - ptr interface{} // always an unsafe.Pointer, but avoids a dependency on package unsafe + ptr any // always an unsafe.Pointer, but avoids a dependency on package unsafe len int }{v.UnsafePointer(), v.Len()} if _, ok := e.ptrSeen[ptr]; ok { @@ -1039,11 +1051,19 @@ type field struct { index []int typ reflect.Type omitEmpty bool + omitZero bool + isZero func(reflect.Value) bool quoted bool encoder encoderFunc } +type isZeroer interface { + IsZero() bool +} + +var isZeroerType = reflect.TypeFor[isZeroer]() + // typeFields returns a list of fields that JSON should recognize for the given type. // The algorithm is breadth-first search over the set of structs to include - the top struct // and then any reachable anonymous structs. @@ -1135,6 +1155,7 @@ func typeFields(t reflect.Type) structFields { index: index, typ: ft, omitEmpty: opts.Contains("omitempty"), + omitZero: opts.Contains("omitzero"), quoted: quoted, } field.nameBytes = []byte(field.name) @@ -1144,6 +1165,40 @@ func typeFields(t reflect.Type) structFields { field.nameEscHTML = `"` + string(nameEscBuf) + `":` field.nameNonEsc = `"` + field.name + `":` + if field.omitZero { + t := sf.Type + // Provide a function that uses a type's IsZero method. + switch { + case t.Kind() == reflect.Interface && t.Implements(isZeroerType): + field.isZero = func(v reflect.Value) bool { + // Avoid panics calling IsZero on a nil interface or + // non-nil interface with nil pointer. + return v.IsNil() || + (v.Elem().Kind() == reflect.Pointer && v.Elem().IsNil()) || + v.Interface().(isZeroer).IsZero() + } + case t.Kind() == reflect.Pointer && t.Implements(isZeroerType): + field.isZero = func(v reflect.Value) bool { + // Avoid panics calling IsZero on nil pointer. + return v.IsNil() || v.Interface().(isZeroer).IsZero() + } + case t.Implements(isZeroerType): + field.isZero = func(v reflect.Value) bool { + return v.Interface().(isZeroer).IsZero() + } + case reflect.PointerTo(t).Implements(isZeroerType): + field.isZero = func(v reflect.Value) bool { + if !v.CanAddr() { + // Temporarily box v so we can take the address. + v2 := reflect.New(v.Type()).Elem() + v2.Set(v) + v = v2 + } + return v.Addr().Interface().(isZeroer).IsZero() + } + } + } + fields = append(fields, field) if count[f.typ] > 1 { // If there were multiple instances, add a second, diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/stream.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/stream.go index 48fc4d945..cc2108b92 100644 --- a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/stream.go +++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/stream.go @@ -31,8 +31,8 @@ func NewDecoder(r io.Reader) *Decoder { return &Decoder{r: r} } -// UseNumber causes the Decoder to unmarshal a number into an interface{} as a -// [Number] instead of as a float64. +// UseNumber causes the Decoder to unmarshal a number into an +// interface value as a [Number] instead of as a float64. func (dec *Decoder) UseNumber() { dec.d.useNumber = true } // DisallowUnknownFields causes the Decoder to return an error when the destination diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v6/schema/elements.go b/vendor/sigs.k8s.io/structured-merge-diff/v6/schema/elements.go index 5d3707a5b..c8138a654 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v6/schema/elements.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v6/schema/elements.go @@ -18,6 +18,7 @@ package schema import ( "sync" + "sync/atomic" ) // Schema is a list of named types. @@ -28,7 +29,7 @@ type Schema struct { Types []TypeDef `yaml:"types,omitempty"` once sync.Once - m map[string]TypeDef + m atomic.Pointer[map[string]TypeDef] lock sync.Mutex // Cached results of resolving type references to atoms. Only stores @@ -144,26 +145,28 @@ type Map struct { ElementRelationship ElementRelationship `yaml:"elementRelationship,omitempty"` once sync.Once - m map[string]StructField + m atomic.Pointer[map[string]StructField] } // FindField is a convenience function that returns the referenced StructField, // if it exists, or (nil, false) if it doesn't. func (m *Map) FindField(name string) (StructField, bool) { m.once.Do(func() { - m.m = make(map[string]StructField, len(m.Fields)) + mm := make(map[string]StructField, len(m.Fields)) for _, field := range m.Fields { - m.m[field.Name] = field + mm[field.Name] = field } + m.m.Store(&mm) }) - sf, ok := m.m[name] + sf, ok := (*m.m.Load())[name] return sf, ok } -// CopyInto this instance of Map into the other -// If other is nil this method does nothing. -// If other is already initialized, overwrites it with this instance -// Warning: Not thread safe +// CopyInto clones this instance of Map into dst +// +// If dst is nil this method does nothing. +// If dst is already initialized, overwrites it with this instance. +// Warning: Not thread safe. Only use dst after this function returns. func (m *Map) CopyInto(dst *Map) { if dst == nil { return @@ -175,12 +178,13 @@ func (m *Map) CopyInto(dst *Map) { dst.Unions = m.Unions dst.ElementRelationship = m.ElementRelationship - if m.m != nil { + mm := m.m.Load() + if mm != nil { // If cache is non-nil then the once token had been consumed. // Must reset token and use it again to ensure same semantics. dst.once = sync.Once{} dst.once.Do(func() { - dst.m = m.m + dst.m.Store(mm) }) } } @@ -274,12 +278,13 @@ type List struct { // if it exists, or (nil, false) if it doesn't. func (s *Schema) FindNamedType(name string) (TypeDef, bool) { s.once.Do(func() { - s.m = make(map[string]TypeDef, len(s.Types)) + sm := make(map[string]TypeDef, len(s.Types)) for _, t := range s.Types { - s.m[t.Name] = t + sm[t.Name] = t } + s.m.Store(&sm) }) - t, ok := s.m[name] + t, ok := (*s.m.Load())[name] return t, ok } @@ -352,10 +357,11 @@ func (s *Schema) Resolve(tr TypeRef) (Atom, bool) { return result, true } -// Clones this instance of Schema into the other -// If other is nil this method does nothing. -// If other is already initialized, overwrites it with this instance -// Warning: Not thread safe +// CopyInto clones this instance of Schema into dst +// +// If dst is nil this method does nothing. +// If dst is already initialized, overwrites it with this instance. +// Warning: Not thread safe. Only use dst after this function returns. func (s *Schema) CopyInto(dst *Schema) { if dst == nil { return @@ -364,12 +370,13 @@ func (s *Schema) CopyInto(dst *Schema) { // Schema type is considered immutable so sharing references dst.Types = s.Types - if s.m != nil { + sm := s.m.Load() + if sm != nil { // If cache is non-nil then the once token had been consumed. // Must reset token and use it again to ensure same semantics. dst.once = sync.Once{} dst.once.Do(func() { - dst.m = s.m + dst.m.Store(sm) }) } } diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v6/typed/remove.go b/vendor/sigs.k8s.io/structured-merge-diff/v6/typed/remove.go index 86de5105d..0db1734f9 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v6/typed/remove.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v6/typed/remove.go @@ -58,6 +58,10 @@ func (w *removingWalker) doList(t *schema.List) (errs ValidationErrors) { defer w.allocator.Free(l) // If list is null or empty just return if l == nil || l.Length() == 0 { + // For extraction, we just return the value as is (which is nil or empty). For extraction the difference matters. + if w.shouldExtract { + w.out = w.value.Unstructured() + } return nil } @@ -71,6 +75,7 @@ func (w *removingWalker) doList(t *schema.List) (errs ValidationErrors) { } var newItems []interface{} + hadMatches := false iter := l.RangeUsing(w.allocator) defer w.allocator.Free(iter) for iter.Next() { @@ -80,24 +85,40 @@ func (w *removingWalker) doList(t *schema.List) (errs ValidationErrors) { path, _ := fieldpath.MakePath(pe) // save items on the path when we shouldExtract // but ignore them when we are removing (i.e. !w.shouldExtract) - if w.toRemove.Has(path) { - if w.shouldExtract { - newItems = append(newItems, removeItemsWithSchema(item, w.toRemove, w.schema, t.ElementType, w.shouldExtract).Unstructured()) - } else { - continue + isExactPathMatch := w.toRemove.Has(path) + isPrefixMatch := !w.toRemove.WithPrefix(pe).Empty() + if w.shouldExtract { + if isPrefixMatch { + item = removeItemsWithSchema(item, w.toRemove.WithPrefix(pe), w.schema, t.ElementType, w.shouldExtract) + } + if isExactPathMatch || isPrefixMatch { + newItems = append(newItems, item.Unstructured()) } - } - if subset := w.toRemove.WithPrefix(pe); !subset.Empty() { - item = removeItemsWithSchema(item, subset, w.schema, t.ElementType, w.shouldExtract) } else { - // don't save items not on the path when we shouldExtract. - if w.shouldExtract { + if isExactPathMatch { continue } + if isPrefixMatch { + // Removing nested items within this list item and preserve if it becomes empty + hadMatches = true + wasMap := item.IsMap() + wasList := item.IsList() + item = removeItemsWithSchema(item, w.toRemove.WithPrefix(pe), w.schema, t.ElementType, w.shouldExtract) + // If item returned null but we're removing items within the structure(not the item itself), + // preserve the empty container structure + if item.IsNull() && !w.shouldExtract { + if wasMap { + item = value.NewValueInterface(map[string]interface{}{}) + } else if wasList { + item = value.NewValueInterface([]interface{}{}) + } + } + } + newItems = append(newItems, item.Unstructured()) } - newItems = append(newItems, item.Unstructured()) } - if len(newItems) > 0 { + // Preserve empty lists (non-nil) instead of converting to null when items were matched and removed + if len(newItems) > 0 || (hadMatches && !w.shouldExtract) { w.out = newItems } return nil @@ -113,6 +134,10 @@ func (w *removingWalker) doMap(t *schema.Map) ValidationErrors { } // If map is null or empty just return if m == nil || m.Empty() { + // For extraction, we just return the value as is (which is nil or empty). For extraction the difference matters. + if w.shouldExtract { + w.out = w.value.Unstructured() + } return nil } @@ -131,6 +156,7 @@ func (w *removingWalker) doMap(t *schema.Map) ValidationErrors { } newMap := map[string]interface{}{} + hadMatches := false m.Iterate(func(k string, val value.Value) bool { pe := fieldpath.PathElement{FieldName: &k} path, _ := fieldpath.MakePath(pe) @@ -148,7 +174,19 @@ func (w *removingWalker) doMap(t *schema.Map) ValidationErrors { return true } if subset := w.toRemove.WithPrefix(pe); !subset.Empty() { + hadMatches = true + wasMap := val.IsMap() + wasList := val.IsList() val = removeItemsWithSchema(val, subset, w.schema, fieldType, w.shouldExtract) + // If val returned null but we're removing items within the structure (not the field itself), + // preserve the empty container structure + if val.IsNull() && !w.shouldExtract { + if wasMap { + val = value.NewValueInterface(map[string]interface{}{}) + } else if wasList { + val = value.NewValueInterface([]interface{}{}) + } + } } else { // don't save values not on the path when we shouldExtract. if w.shouldExtract { @@ -158,7 +196,8 @@ func (w *removingWalker) doMap(t *schema.Map) ValidationErrors { newMap[k] = val.Unstructured() return true }) - if len(newMap) > 0 { + // Preserve empty maps (non-nil) instead of converting to null when items were matched and removed + if len(newMap) > 0 || (hadMatches && !w.shouldExtract) { w.out = newMap } return nil