diff --git a/go.mod b/go.mod
index c455bc4a3..5045b68ae 100644
--- a/go.mod
+++ b/go.mod
@@ -63,7 +63,7 @@ require (
github.com/Azure/go-autorest/logger v0.2.1 // indirect
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
github.com/Azure/msi-dataplane v0.4.2 // indirect
- github.com/AzureAD/microsoft-authentication-library-for-go v1.4.0 // indirect
+ github.com/AzureAD/microsoft-authentication-library-for-go v1.4.1 // indirect
github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/NYTimes/gziphandler v1.1.1 // indirect
@@ -160,13 +160,13 @@ require (
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
golang.org/x/crypto v0.35.0 // indirect
- golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
+ golang.org/x/exp v0.0.0-20250228200357-dead58393ab7 // indirect
golang.org/x/oauth2 v0.24.0 // indirect
golang.org/x/sys v0.30.0 // indirect
golang.org/x/term v0.29.0 // indirect
golang.org/x/text v0.22.0 // indirect
golang.org/x/time v0.10.0 // indirect
- golang.org/x/tools v0.28.0 // indirect
+ golang.org/x/tools v0.30.0 // indirect
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect
diff --git a/go.sum b/go.sum
index 731ffd3d7..3504b2a90 100644
--- a/go.sum
+++ b/go.sum
@@ -66,8 +66,8 @@ github.com/Azure/msi-dataplane v0.4.2 h1:4V44wRZ+sKmKgj64SKN5lMskt1qQBQSUiy6kazW
github.com/Azure/msi-dataplane v0.4.2/go.mod h1:yAfxdJyvcnvSDfSyOFV9qm4fReEQDl+nZLGeH2ZWSmw=
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM=
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE=
-github.com/AzureAD/microsoft-authentication-library-for-go v1.4.0 h1:MUkXAnvvDHgvPItl0nBj0hgk0f7hnnQbGm0h0+YxbN4=
-github.com/AzureAD/microsoft-authentication-library-for-go v1.4.0/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
+github.com/AzureAD/microsoft-authentication-library-for-go v1.4.1 h1:8BKxhZZLX/WosEeoCvWysmKUscfa9v8LIPEEU0JjE2o=
+github.com/AzureAD/microsoft-authentication-library-for-go v1.4.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab h1:UKkYhof1njT1/xq4SEg5z+VpTgjmNeHwPGRQl7takDI=
github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab/go.mod h1:3VYc5hodBMJ5+l/7J4xAyMeuM2PNuepvHlGs8yilUCA=
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
@@ -370,8 +370,8 @@ golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0
golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
golang.org/x/crypto v0.35.0 h1:b15kiHdrGCHrP6LvwaQ3c03kgNhhiMgvlhxHQhmg2Xs=
golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ=
-golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
-golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
+golang.org/x/exp v0.0.0-20250228200357-dead58393ab7 h1:aWwlzYV971S4BXRS9AmqwDLAD85ouC6X+pocatKY58c=
+golang.org/x/exp v0.0.0-20250228200357-dead58393ab7/go.mod h1:BHOTPb3L19zxehTsLoJXVaTktb06DFgmdW6Wb9s8jqk=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
@@ -435,8 +435,8 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
-golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8=
-golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw=
+golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY=
+golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go
index 71275b32f..7bd0a8b3c 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go
@@ -68,7 +68,7 @@ type DeviceCodeResponse struct {
UserCode string `json:"user_code"`
DeviceCode string `json:"device_code"`
- VerificationURL string `json:"verification_url"`
+ VerificationURL string `json:"verification_uri"`
ExpiresIn int `json:"expires_in"`
Interval int `json:"interval"`
Message string `json:"message"`
diff --git a/vendor/golang.org/x/exp/constraints/constraints.go b/vendor/golang.org/x/exp/constraints/constraints.go
deleted file mode 100644
index 2c033dff4..000000000
--- a/vendor/golang.org/x/exp/constraints/constraints.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package constraints defines a set of useful constraints to be used
-// with type parameters.
-package constraints
-
-// Signed is a constraint that permits any signed integer type.
-// If future releases of Go add new predeclared signed integer types,
-// this constraint will be modified to include them.
-type Signed interface {
- ~int | ~int8 | ~int16 | ~int32 | ~int64
-}
-
-// Unsigned is a constraint that permits any unsigned integer type.
-// If future releases of Go add new predeclared unsigned integer types,
-// this constraint will be modified to include them.
-type Unsigned interface {
- ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr
-}
-
-// Integer is a constraint that permits any integer type.
-// If future releases of Go add new predeclared integer types,
-// this constraint will be modified to include them.
-type Integer interface {
- Signed | Unsigned
-}
-
-// Float is a constraint that permits any floating-point type.
-// If future releases of Go add new predeclared floating-point types,
-// this constraint will be modified to include them.
-type Float interface {
- ~float32 | ~float64
-}
-
-// Complex is a constraint that permits any complex numeric type.
-// If future releases of Go add new predeclared complex numeric types,
-// this constraint will be modified to include them.
-type Complex interface {
- ~complex64 | ~complex128
-}
-
-// Ordered is a constraint that permits any ordered type: any type
-// that supports the operators < <= >= >.
-// If future releases of Go add new ordered types,
-// this constraint will be modified to include them.
-type Ordered interface {
- Integer | Float | ~string
-}
diff --git a/vendor/golang.org/x/exp/slices/cmp.go b/vendor/golang.org/x/exp/slices/cmp.go
deleted file mode 100644
index fbf1934a0..000000000
--- a/vendor/golang.org/x/exp/slices/cmp.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package slices
-
-import "golang.org/x/exp/constraints"
-
-// min is a version of the predeclared function from the Go 1.21 release.
-func min[T constraints.Ordered](a, b T) T {
- if a < b || isNaN(a) {
- return a
- }
- return b
-}
-
-// max is a version of the predeclared function from the Go 1.21 release.
-func max[T constraints.Ordered](a, b T) T {
- if a > b || isNaN(a) {
- return a
- }
- return b
-}
-
-// cmpLess is a copy of cmp.Less from the Go 1.21 release.
-func cmpLess[T constraints.Ordered](x, y T) bool {
- return (isNaN(x) && !isNaN(y)) || x < y
-}
-
-// cmpCompare is a copy of cmp.Compare from the Go 1.21 release.
-func cmpCompare[T constraints.Ordered](x, y T) int {
- xNaN := isNaN(x)
- yNaN := isNaN(y)
- if xNaN && yNaN {
- return 0
- }
- if xNaN || x < y {
- return -1
- }
- if yNaN || x > y {
- return +1
- }
- return 0
-}
diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go
index 46ceac343..da0df370d 100644
--- a/vendor/golang.org/x/exp/slices/slices.go
+++ b/vendor/golang.org/x/exp/slices/slices.go
@@ -6,9 +6,8 @@
package slices
import (
- "unsafe"
-
- "golang.org/x/exp/constraints"
+ "cmp"
+ "slices"
)
// Equal reports whether two slices are equal: the same length and all
@@ -16,16 +15,10 @@ import (
// Otherwise, the elements are compared in increasing index order, and the
// comparison stops at the first unequal pair.
// Floating point NaNs are not considered equal.
+//
+//go:fix inline
func Equal[S ~[]E, E comparable](s1, s2 S) bool {
- if len(s1) != len(s2) {
- return false
- }
- for i := range s1 {
- if s1[i] != s2[i] {
- return false
- }
- }
- return true
+ return slices.Equal(s1, s2)
}
// EqualFunc reports whether two slices are equal using an equality
@@ -33,17 +26,10 @@ func Equal[S ~[]E, E comparable](s1, s2 S) bool {
// EqualFunc returns false. Otherwise, the elements are compared in
// increasing index order, and the comparison stops at the first index
// for which eq returns false.
+//
+//go:fix inline
func EqualFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, eq func(E1, E2) bool) bool {
- if len(s1) != len(s2) {
- return false
- }
- for i, v1 := range s1 {
- v2 := s2[i]
- if !eq(v1, v2) {
- return false
- }
- }
- return true
+ return slices.EqualFunc(s1, s2, eq)
}
// Compare compares the elements of s1 and s2, using [cmp.Compare] on each pair
@@ -53,20 +39,10 @@ func EqualFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, eq func(E1, E2) boo
// If both slices are equal until one of them ends, the shorter slice is
// considered less than the longer one.
// The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2.
-func Compare[S ~[]E, E constraints.Ordered](s1, s2 S) int {
- for i, v1 := range s1 {
- if i >= len(s2) {
- return +1
- }
- v2 := s2[i]
- if c := cmpCompare(v1, v2); c != 0 {
- return c
- }
- }
- if len(s1) < len(s2) {
- return -1
- }
- return 0
+//
+//go:fix inline
+func Compare[S ~[]E, E cmp.Ordered](s1, s2 S) int {
+ return slices.Compare(s1, s2)
}
// CompareFunc is like [Compare] but uses a custom comparison function on each
@@ -74,53 +50,41 @@ func Compare[S ~[]E, E constraints.Ordered](s1, s2 S) int {
// The result is the first non-zero result of cmp; if cmp always
// returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2),
// and +1 if len(s1) > len(s2).
+//
+//go:fix inline
func CompareFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, cmp func(E1, E2) int) int {
- for i, v1 := range s1 {
- if i >= len(s2) {
- return +1
- }
- v2 := s2[i]
- if c := cmp(v1, v2); c != 0 {
- return c
- }
- }
- if len(s1) < len(s2) {
- return -1
- }
- return 0
+ return slices.CompareFunc(s1, s2, cmp)
}
// Index returns the index of the first occurrence of v in s,
// or -1 if not present.
+//
+//go:fix inline
func Index[S ~[]E, E comparable](s S, v E) int {
- for i := range s {
- if v == s[i] {
- return i
- }
- }
- return -1
+ return slices.Index(s, v)
}
// IndexFunc returns the first index i satisfying f(s[i]),
// or -1 if none do.
+//
+//go:fix inline
func IndexFunc[S ~[]E, E any](s S, f func(E) bool) int {
- for i := range s {
- if f(s[i]) {
- return i
- }
- }
- return -1
+ return slices.IndexFunc(s, f)
}
// Contains reports whether v is present in s.
+//
+//go:fix inline
func Contains[S ~[]E, E comparable](s S, v E) bool {
- return Index(s, v) >= 0
+ return slices.Contains(s, v)
}
// ContainsFunc reports whether at least one
// element e of s satisfies f(e).
+//
+//go:fix inline
func ContainsFunc[S ~[]E, E any](s S, f func(E) bool) bool {
- return IndexFunc(s, f) >= 0
+ return slices.ContainsFunc(s, f)
}
// Insert inserts the values v... into s at index i,
@@ -130,93 +94,10 @@ func ContainsFunc[S ~[]E, E any](s S, f func(E) bool) bool {
// and r[i+len(v)] == value originally at r[i].
// Insert panics if i is out of range.
// This function is O(len(s) + len(v)).
+//
+//go:fix inline
func Insert[S ~[]E, E any](s S, i int, v ...E) S {
- m := len(v)
- if m == 0 {
- return s
- }
- n := len(s)
- if i == n {
- return append(s, v...)
- }
- if n+m > cap(s) {
- // Use append rather than make so that we bump the size of
- // the slice up to the next storage class.
- // This is what Grow does but we don't call Grow because
- // that might copy the values twice.
- s2 := append(s[:i], make(S, n+m-i)...)
- copy(s2[i:], v)
- copy(s2[i+m:], s[i:])
- return s2
- }
- s = s[:n+m]
-
- // before:
- // s: aaaaaaaabbbbccccccccdddd
- // ^ ^ ^ ^
- // i i+m n n+m
- // after:
- // s: aaaaaaaavvvvbbbbcccccccc
- // ^ ^ ^ ^
- // i i+m n n+m
- //
- // a are the values that don't move in s.
- // v are the values copied in from v.
- // b and c are the values from s that are shifted up in index.
- // d are the values that get overwritten, never to be seen again.
-
- if !overlaps(v, s[i+m:]) {
- // Easy case - v does not overlap either the c or d regions.
- // (It might be in some of a or b, or elsewhere entirely.)
- // The data we copy up doesn't write to v at all, so just do it.
-
- copy(s[i+m:], s[i:])
-
- // Now we have
- // s: aaaaaaaabbbbbbbbcccccccc
- // ^ ^ ^ ^
- // i i+m n n+m
- // Note the b values are duplicated.
-
- copy(s[i:], v)
-
- // Now we have
- // s: aaaaaaaavvvvbbbbcccccccc
- // ^ ^ ^ ^
- // i i+m n n+m
- // That's the result we want.
- return s
- }
-
- // The hard case - v overlaps c or d. We can't just shift up
- // the data because we'd move or clobber the values we're trying
- // to insert.
- // So instead, write v on top of d, then rotate.
- copy(s[n:], v)
-
- // Now we have
- // s: aaaaaaaabbbbccccccccvvvv
- // ^ ^ ^ ^
- // i i+m n n+m
-
- rotateRight(s[i:], m)
-
- // Now we have
- // s: aaaaaaaavvvvbbbbcccccccc
- // ^ ^ ^ ^
- // i i+m n n+m
- // That's the result we want.
- return s
-}
-
-// clearSlice sets all elements up to the length of s to the zero value of E.
-// We may use the builtin clear func instead, and remove clearSlice, when upgrading
-// to Go 1.21+.
-func clearSlice[S ~[]E, E any](s S) {
- var zero E
- for i := range s {
- s[i] = zero
- }
+ return slices.Insert(s, i, v...)
}
// Delete removes the elements s[i:j] from s, returning the modified slice.
@@ -224,136 +105,36 @@ func clearSlice[S ~[]E, E any](s S) {
// Delete is O(len(s)-i), so if many items must be deleted, it is better to
// make a single call deleting them all together than to delete one at a time.
// Delete zeroes the elements s[len(s)-(j-i):len(s)].
+//
+//go:fix inline
func Delete[S ~[]E, E any](s S, i, j int) S {
- _ = s[i:j:len(s)] // bounds check
-
- if i == j {
- return s
- }
-
- oldlen := len(s)
- s = append(s[:i], s[j:]...)
- clearSlice(s[len(s):oldlen]) // zero/nil out the obsolete elements, for GC
- return s
+ return slices.Delete(s, i, j)
}
// DeleteFunc removes any elements from s for which del returns true,
// returning the modified slice.
// DeleteFunc zeroes the elements between the new length and the original length.
+//
+//go:fix inline
func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S {
- i := IndexFunc(s, del)
- if i == -1 {
- return s
- }
- // Don't start copying elements until we find one to delete.
- for j := i + 1; j < len(s); j++ {
- if v := s[j]; !del(v) {
- s[i] = v
- i++
- }
- }
- clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC
- return s[:i]
+ return slices.DeleteFunc(s, del)
}
// Replace replaces the elements s[i:j] by the given v, and returns the
// modified slice. Replace panics if s[i:j] is not a valid slice of s.
// When len(v) < (j-i), Replace zeroes the elements between the new length and the original length.
+//
+//go:fix inline
func Replace[S ~[]E, E any](s S, i, j int, v ...E) S {
- _ = s[i:j] // verify that i:j is a valid subslice
-
- if i == j {
- return Insert(s, i, v...)
- }
- if j == len(s) {
- return append(s[:i], v...)
- }
-
- tot := len(s[:i]) + len(v) + len(s[j:])
- if tot > cap(s) {
- // Too big to fit, allocate and copy over.
- s2 := append(s[:i], make(S, tot-i)...) // See Insert
- copy(s2[i:], v)
- copy(s2[i+len(v):], s[j:])
- return s2
- }
-
- r := s[:tot]
-
- if i+len(v) <= j {
- // Easy, as v fits in the deleted portion.
- copy(r[i:], v)
- if i+len(v) != j {
- copy(r[i+len(v):], s[j:])
- }
- clearSlice(s[tot:]) // zero/nil out the obsolete elements, for GC
- return r
- }
-
- // We are expanding (v is bigger than j-i).
- // The situation is something like this:
- // (example has i=4,j=8,len(s)=16,len(v)=6)
- // s: aaaaxxxxbbbbbbbbyy
- // ^ ^ ^ ^
- // i j len(s) tot
- // a: prefix of s
- // x: deleted range
- // b: more of s
- // y: area to expand into
-
- if !overlaps(r[i+len(v):], v) {
- // Easy, as v is not clobbered by the first copy.
- copy(r[i+len(v):], s[j:])
- copy(r[i:], v)
- return r
- }
-
- // This is a situation where we don't have a single place to which
- // we can copy v. Parts of it need to go to two different places.
- // We want to copy the prefix of v into y and the suffix into x, then
- // rotate |y| spots to the right.
- //
- // v[2:] v[:2]
- // | |
- // s: aaaavvvvbbbbbbbbvv
- // ^ ^ ^ ^
- // i j len(s) tot
- //
- // If either of those two destinations don't alias v, then we're good.
- y := len(v) - (j - i) // length of y portion
-
- if !overlaps(r[i:j], v) {
- copy(r[i:j], v[y:])
- copy(r[len(s):], v[:y])
- rotateRight(r[i:], y)
- return r
- }
- if !overlaps(r[len(s):], v) {
- copy(r[len(s):], v[:y])
- copy(r[i:j], v[y:])
- rotateRight(r[i:], y)
- return r
- }
-
- // Now we know that v overlaps both x and y.
- // That means that the entirety of b is *inside* v.
- // So we don't need to preserve b at all; instead we
- // can copy v first, then copy the b part of v out of
- // v to the right destination.
- k := startIdx(v, s[j:])
- copy(r[i:], v)
- copy(r[i+len(v):], r[i+k:])
- return r
+ return slices.Replace(s, i, j, v...)
}
// Clone returns a copy of the slice.
// The elements are copied using assignment, so this is a shallow clone.
+//
+//go:fix inline
func Clone[S ~[]E, E any](s S) S {
- // Preserve nil in case it matters.
- if s == nil {
- return nil
- }
- return append(S([]E{}), s...)
+ return slices.Clone(s)
}
// Compact replaces consecutive runs of equal elements with a single copy.
@@ -361,155 +142,41 @@ func Clone[S ~[]E, E any](s S) S {
// Compact modifies the contents of the slice s and returns the modified slice,
// which may have a smaller length.
// Compact zeroes the elements between the new length and the original length.
+//
+//go:fix inline
func Compact[S ~[]E, E comparable](s S) S {
- if len(s) < 2 {
- return s
- }
- i := 1
- for k := 1; k < len(s); k++ {
- if s[k] != s[k-1] {
- if i != k {
- s[i] = s[k]
- }
- i++
- }
- }
- clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC
- return s[:i]
+ return slices.Compact(s)
}
// CompactFunc is like [Compact] but uses an equality function to compare elements.
// For runs of elements that compare equal, CompactFunc keeps the first one.
// CompactFunc zeroes the elements between the new length and the original length.
+//
+//go:fix inline
func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S {
- if len(s) < 2 {
- return s
- }
- i := 1
- for k := 1; k < len(s); k++ {
- if !eq(s[k], s[k-1]) {
- if i != k {
- s[i] = s[k]
- }
- i++
- }
- }
- clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC
- return s[:i]
+ return slices.CompactFunc(s, eq)
}
// Grow increases the slice's capacity, if necessary, to guarantee space for
// another n elements. After Grow(n), at least n elements can be appended
// to the slice without another allocation. If n is negative or too large to
// allocate the memory, Grow panics.
+//
+//go:fix inline
func Grow[S ~[]E, E any](s S, n int) S {
- if n < 0 {
- panic("cannot be negative")
- }
- if n -= cap(s) - len(s); n > 0 {
- // TODO(https://go.dev/issue/53888): Make using []E instead of S
- // to workaround a compiler bug where the runtime.growslice optimization
- // does not take effect. Revert when the compiler is fixed.
- s = append([]E(s)[:cap(s)], make([]E, n)...)[:len(s)]
- }
- return s
+ return slices.Grow(s, n)
}
// Clip removes unused capacity from the slice, returning s[:len(s):len(s)].
-func Clip[S ~[]E, E any](s S) S {
- return s[:len(s):len(s)]
-}
-
-// Rotation algorithm explanation:
-//
-// rotate left by 2
-// start with
-// 0123456789
-// split up like this
-// 01 234567 89
-// swap first 2 and last 2
-// 89 234567 01
-// join first parts
-// 89234567 01
-// recursively rotate first left part by 2
-// 23456789 01
-// join at the end
-// 2345678901
//
-// rotate left by 8
-// start with
-// 0123456789
-// split up like this
-// 01 234567 89
-// swap first 2 and last 2
-// 89 234567 01
-// join last parts
-// 89 23456701
-// recursively rotate second part left by 6
-// 89 01234567
-// join at the end
-// 8901234567
-
-// TODO: There are other rotate algorithms.
-// This algorithm has the desirable property that it moves each element exactly twice.
-// The triple-reverse algorithm is simpler and more cache friendly, but takes more writes.
-// The follow-cycles algorithm can be 1-write but it is not very cache friendly.
-
-// rotateLeft rotates b left by n spaces.
-// s_final[i] = s_orig[i+r], wrapping around.
-func rotateLeft[E any](s []E, r int) {
- for r != 0 && r != len(s) {
- if r*2 <= len(s) {
- swap(s[:r], s[len(s)-r:])
- s = s[:len(s)-r]
- } else {
- swap(s[:len(s)-r], s[r:])
- s, r = s[len(s)-r:], r*2-len(s)
- }
- }
-}
-func rotateRight[E any](s []E, r int) {
- rotateLeft(s, len(s)-r)
-}
-
-// swap swaps the contents of x and y. x and y must be equal length and disjoint.
-func swap[E any](x, y []E) {
- for i := 0; i < len(x); i++ {
- x[i], y[i] = y[i], x[i]
- }
-}
-
-// overlaps reports whether the memory ranges a[0:len(a)] and b[0:len(b)] overlap.
-func overlaps[E any](a, b []E) bool {
- if len(a) == 0 || len(b) == 0 {
- return false
- }
- elemSize := unsafe.Sizeof(a[0])
- if elemSize == 0 {
- return false
- }
- // TODO: use a runtime/unsafe facility once one becomes available. See issue 12445.
- // Also see crypto/internal/alias/alias.go:AnyOverlap
- return uintptr(unsafe.Pointer(&a[0])) <= uintptr(unsafe.Pointer(&b[len(b)-1]))+(elemSize-1) &&
- uintptr(unsafe.Pointer(&b[0])) <= uintptr(unsafe.Pointer(&a[len(a)-1]))+(elemSize-1)
-}
-
-// startIdx returns the index in haystack where the needle starts.
-// prerequisite: the needle must be aliased entirely inside the haystack.
-func startIdx[E any](haystack, needle []E) int {
- p := &needle[0]
- for i := range haystack {
- if p == &haystack[i] {
- return i
- }
- }
- // TODO: what if the overlap is by a non-integral number of Es?
- panic("needle not found")
+//go:fix inline
+func Clip[S ~[]E, E any](s S) S {
+ return slices.Clip(s)
}
// Reverse reverses the elements of the slice in place.
+//
+//go:fix inline
func Reverse[S ~[]E, E any](s S) {
- for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 {
- s[i], s[j] = s[j], s[i]
- }
+ slices.Reverse(s)
}
diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go
index f58bbc7ba..bd91a8d40 100644
--- a/vendor/golang.org/x/exp/slices/sort.go
+++ b/vendor/golang.org/x/exp/slices/sort.go
@@ -2,21 +2,19 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:generate go run $GOROOT/src/sort/gen_sort_variants.go -exp
-
package slices
import (
- "math/bits"
-
- "golang.org/x/exp/constraints"
+ "cmp"
+ "slices"
)
// Sort sorts a slice of any ordered type in ascending order.
// When sorting floating-point numbers, NaNs are ordered before other values.
-func Sort[S ~[]E, E constraints.Ordered](x S) {
- n := len(x)
- pdqsortOrdered(x, 0, n, bits.Len(uint(n)))
+//
+//go:fix inline
+func Sort[S ~[]E, E cmp.Ordered](x S) {
+ slices.Sort(x)
}
// SortFunc sorts the slice x in ascending order as determined by the cmp
@@ -28,119 +26,79 @@ func Sort[S ~[]E, E constraints.Ordered](x S) {
// SortFunc requires that cmp is a strict weak ordering.
// See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings.
// To indicate 'uncomparable', return 0 from the function.
+//
+//go:fix inline
func SortFunc[S ~[]E, E any](x S, cmp func(a, b E) int) {
- n := len(x)
- pdqsortCmpFunc(x, 0, n, bits.Len(uint(n)), cmp)
+ slices.SortFunc(x, cmp)
}
// SortStableFunc sorts the slice x while keeping the original order of equal
// elements, using cmp to compare elements in the same way as [SortFunc].
+//
+//go:fix inline
func SortStableFunc[S ~[]E, E any](x S, cmp func(a, b E) int) {
- stableCmpFunc(x, len(x), cmp)
+ slices.SortStableFunc(x, cmp)
}
// IsSorted reports whether x is sorted in ascending order.
-func IsSorted[S ~[]E, E constraints.Ordered](x S) bool {
- for i := len(x) - 1; i > 0; i-- {
- if cmpLess(x[i], x[i-1]) {
- return false
- }
- }
- return true
+//
+//go:fix inline
+func IsSorted[S ~[]E, E cmp.Ordered](x S) bool {
+ return slices.IsSorted(x)
}
// IsSortedFunc reports whether x is sorted in ascending order, with cmp as the
// comparison function as defined by [SortFunc].
+//
+//go:fix inline
func IsSortedFunc[S ~[]E, E any](x S, cmp func(a, b E) int) bool {
- for i := len(x) - 1; i > 0; i-- {
- if cmp(x[i], x[i-1]) < 0 {
- return false
- }
- }
- return true
+ return slices.IsSortedFunc(x, cmp)
}
// Min returns the minimal value in x. It panics if x is empty.
// For floating-point numbers, Min propagates NaNs (any NaN value in x
// forces the output to be NaN).
-func Min[S ~[]E, E constraints.Ordered](x S) E {
- if len(x) < 1 {
- panic("slices.Min: empty list")
- }
- m := x[0]
- for i := 1; i < len(x); i++ {
- m = min(m, x[i])
- }
- return m
+//
+//go:fix inline
+func Min[S ~[]E, E cmp.Ordered](x S) E {
+ return slices.Min(x)
}
// MinFunc returns the minimal value in x, using cmp to compare elements.
// It panics if x is empty. If there is more than one minimal element
// according to the cmp function, MinFunc returns the first one.
+//
+//go:fix inline
func MinFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E {
- if len(x) < 1 {
- panic("slices.MinFunc: empty list")
- }
- m := x[0]
- for i := 1; i < len(x); i++ {
- if cmp(x[i], m) < 0 {
- m = x[i]
- }
- }
- return m
+ return slices.MinFunc(x, cmp)
}
// Max returns the maximal value in x. It panics if x is empty.
// For floating-point E, Max propagates NaNs (any NaN value in x
// forces the output to be NaN).
-func Max[S ~[]E, E constraints.Ordered](x S) E {
- if len(x) < 1 {
- panic("slices.Max: empty list")
- }
- m := x[0]
- for i := 1; i < len(x); i++ {
- m = max(m, x[i])
- }
- return m
+//
+//go:fix inline
+func Max[S ~[]E, E cmp.Ordered](x S) E {
+ return slices.Max(x)
}
// MaxFunc returns the maximal value in x, using cmp to compare elements.
// It panics if x is empty. If there is more than one maximal element
// according to the cmp function, MaxFunc returns the first one.
+//
+//go:fix inline
func MaxFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E {
- if len(x) < 1 {
- panic("slices.MaxFunc: empty list")
- }
- m := x[0]
- for i := 1; i < len(x); i++ {
- if cmp(x[i], m) > 0 {
- m = x[i]
- }
- }
- return m
+ return slices.MaxFunc(x, cmp)
}
// BinarySearch searches for target in a sorted slice and returns the position
// where target is found, or the position where target would appear in the
// sort order; it also returns a bool saying whether the target is really found
// in the slice. The slice must be sorted in increasing order.
-func BinarySearch[S ~[]E, E constraints.Ordered](x S, target E) (int, bool) {
- // Inlining is faster than calling BinarySearchFunc with a lambda.
- n := len(x)
- // Define x[-1] < target and x[n] >= target.
- // Invariant: x[i-1] < target, x[j] >= target.
- i, j := 0, n
- for i < j {
- h := int(uint(i+j) >> 1) // avoid overflow when computing h
- // i ≤ h < j
- if cmpLess(x[h], target) {
- i = h + 1 // preserves x[i-1] < target
- } else {
- j = h // preserves x[j] >= target
- }
- }
- // i == j, x[i-1] < target, and x[j] (= x[i]) >= target => answer is i.
- return i, i < n && (x[i] == target || (isNaN(x[i]) && isNaN(target)))
+//
+//go:fix inline
+func BinarySearch[S ~[]E, E cmp.Ordered](x S, target E) (int, bool) {
+ return slices.BinarySearch(x, target)
}
// BinarySearchFunc works like [BinarySearch], but uses a custom comparison
@@ -150,48 +108,8 @@ func BinarySearch[S ~[]E, E constraints.Ordered](x S, target E) (int, bool) {
// or a positive number if the slice element follows the target.
// cmp must implement the same ordering as the slice, such that if
// cmp(a, t) < 0 and cmp(b, t) >= 0, then a must precede b in the slice.
+//
+//go:fix inline
func BinarySearchFunc[S ~[]E, E, T any](x S, target T, cmp func(E, T) int) (int, bool) {
- n := len(x)
- // Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 .
- // Invariant: cmp(x[i - 1], target) < 0, cmp(x[j], target) >= 0.
- i, j := 0, n
- for i < j {
- h := int(uint(i+j) >> 1) // avoid overflow when computing h
- // i ≤ h < j
- if cmp(x[h], target) < 0 {
- i = h + 1 // preserves cmp(x[i - 1], target) < 0
- } else {
- j = h // preserves cmp(x[j], target) >= 0
- }
- }
- // i == j, cmp(x[i-1], target) < 0, and cmp(x[j], target) (= cmp(x[i], target)) >= 0 => answer is i.
- return i, i < n && cmp(x[i], target) == 0
-}
-
-type sortedHint int // hint for pdqsort when choosing the pivot
-
-const (
- unknownHint sortedHint = iota
- increasingHint
- decreasingHint
-)
-
-// xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf
-type xorshift uint64
-
-func (r *xorshift) Next() uint64 {
- *r ^= *r << 13
- *r ^= *r >> 17
- *r ^= *r << 5
- return uint64(*r)
-}
-
-func nextPowerOfTwo(length int) uint {
- return 1 << bits.Len(uint(length))
-}
-
-// isNaN reports whether x is a NaN without requiring the math package.
-// This will always return false if T is not floating-point.
-func isNaN[T constraints.Ordered](x T) bool {
- return x != x
+ return slices.BinarySearchFunc(x, target, cmp)
}
diff --git a/vendor/golang.org/x/exp/slices/zsortanyfunc.go b/vendor/golang.org/x/exp/slices/zsortanyfunc.go
deleted file mode 100644
index 06f2c7a24..000000000
--- a/vendor/golang.org/x/exp/slices/zsortanyfunc.go
+++ /dev/null
@@ -1,479 +0,0 @@
-// Code generated by gen_sort_variants.go; DO NOT EDIT.
-
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package slices
-
-// insertionSortCmpFunc sorts data[a:b] using insertion sort.
-func insertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
- for i := a + 1; i < b; i++ {
- for j := i; j > a && (cmp(data[j], data[j-1]) < 0); j-- {
- data[j], data[j-1] = data[j-1], data[j]
- }
- }
-}
-
-// siftDownCmpFunc implements the heap property on data[lo:hi].
-// first is an offset into the array where the root of the heap lies.
-func siftDownCmpFunc[E any](data []E, lo, hi, first int, cmp func(a, b E) int) {
- root := lo
- for {
- child := 2*root + 1
- if child >= hi {
- break
- }
- if child+1 < hi && (cmp(data[first+child], data[first+child+1]) < 0) {
- child++
- }
- if !(cmp(data[first+root], data[first+child]) < 0) {
- return
- }
- data[first+root], data[first+child] = data[first+child], data[first+root]
- root = child
- }
-}
-
-func heapSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
- first := a
- lo := 0
- hi := b - a
-
- // Build heap with greatest element at top.
- for i := (hi - 1) / 2; i >= 0; i-- {
- siftDownCmpFunc(data, i, hi, first, cmp)
- }
-
- // Pop elements, largest first, into end of data.
- for i := hi - 1; i >= 0; i-- {
- data[first], data[first+i] = data[first+i], data[first]
- siftDownCmpFunc(data, lo, i, first, cmp)
- }
-}
-
-// pdqsortCmpFunc sorts data[a:b].
-// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
-// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
-// C++ implementation: https://github.com/orlp/pdqsort
-// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
-// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
-func pdqsortCmpFunc[E any](data []E, a, b, limit int, cmp func(a, b E) int) {
- const maxInsertion = 12
-
- var (
- wasBalanced = true // whether the last partitioning was reasonably balanced
- wasPartitioned = true // whether the slice was already partitioned
- )
-
- for {
- length := b - a
-
- if length <= maxInsertion {
- insertionSortCmpFunc(data, a, b, cmp)
- return
- }
-
- // Fall back to heapsort if too many bad choices were made.
- if limit == 0 {
- heapSortCmpFunc(data, a, b, cmp)
- return
- }
-
- // If the last partitioning was imbalanced, we need to breaking patterns.
- if !wasBalanced {
- breakPatternsCmpFunc(data, a, b, cmp)
- limit--
- }
-
- pivot, hint := choosePivotCmpFunc(data, a, b, cmp)
- if hint == decreasingHint {
- reverseRangeCmpFunc(data, a, b, cmp)
- // The chosen pivot was pivot-a elements after the start of the array.
- // After reversing it is pivot-a elements before the end of the array.
- // The idea came from Rust's implementation.
- pivot = (b - 1) - (pivot - a)
- hint = increasingHint
- }
-
- // The slice is likely already sorted.
- if wasBalanced && wasPartitioned && hint == increasingHint {
- if partialInsertionSortCmpFunc(data, a, b, cmp) {
- return
- }
- }
-
- // Probably the slice contains many duplicate elements, partition the slice into
- // elements equal to and elements greater than the pivot.
- if a > 0 && !(cmp(data[a-1], data[pivot]) < 0) {
- mid := partitionEqualCmpFunc(data, a, b, pivot, cmp)
- a = mid
- continue
- }
-
- mid, alreadyPartitioned := partitionCmpFunc(data, a, b, pivot, cmp)
- wasPartitioned = alreadyPartitioned
-
- leftLen, rightLen := mid-a, b-mid
- balanceThreshold := length / 8
- if leftLen < rightLen {
- wasBalanced = leftLen >= balanceThreshold
- pdqsortCmpFunc(data, a, mid, limit, cmp)
- a = mid + 1
- } else {
- wasBalanced = rightLen >= balanceThreshold
- pdqsortCmpFunc(data, mid+1, b, limit, cmp)
- b = mid
- }
- }
-}
-
-// partitionCmpFunc does one quicksort partition.
-// Let p = data[pivot]
-// Moves elements in data[a:b] around, so that data[i]
=p for inewpivot.
-// On return, data[newpivot] = p
-func partitionCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int, alreadyPartitioned bool) {
- data[a], data[pivot] = data[pivot], data[a]
- i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
-
- for i <= j && (cmp(data[i], data[a]) < 0) {
- i++
- }
- for i <= j && !(cmp(data[j], data[a]) < 0) {
- j--
- }
- if i > j {
- data[j], data[a] = data[a], data[j]
- return j, true
- }
- data[i], data[j] = data[j], data[i]
- i++
- j--
-
- for {
- for i <= j && (cmp(data[i], data[a]) < 0) {
- i++
- }
- for i <= j && !(cmp(data[j], data[a]) < 0) {
- j--
- }
- if i > j {
- break
- }
- data[i], data[j] = data[j], data[i]
- i++
- j--
- }
- data[j], data[a] = data[a], data[j]
- return j, false
-}
-
-// partitionEqualCmpFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
-// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
-func partitionEqualCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int) {
- data[a], data[pivot] = data[pivot], data[a]
- i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
-
- for {
- for i <= j && !(cmp(data[a], data[i]) < 0) {
- i++
- }
- for i <= j && (cmp(data[a], data[j]) < 0) {
- j--
- }
- if i > j {
- break
- }
- data[i], data[j] = data[j], data[i]
- i++
- j--
- }
- return i
-}
-
-// partialInsertionSortCmpFunc partially sorts a slice, returns true if the slice is sorted at the end.
-func partialInsertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) bool {
- const (
- maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted
- shortestShifting = 50 // don't shift any elements on short arrays
- )
- i := a + 1
- for j := 0; j < maxSteps; j++ {
- for i < b && !(cmp(data[i], data[i-1]) < 0) {
- i++
- }
-
- if i == b {
- return true
- }
-
- if b-a < shortestShifting {
- return false
- }
-
- data[i], data[i-1] = data[i-1], data[i]
-
- // Shift the smaller one to the left.
- if i-a >= 2 {
- for j := i - 1; j >= 1; j-- {
- if !(cmp(data[j], data[j-1]) < 0) {
- break
- }
- data[j], data[j-1] = data[j-1], data[j]
- }
- }
- // Shift the greater one to the right.
- if b-i >= 2 {
- for j := i + 1; j < b; j++ {
- if !(cmp(data[j], data[j-1]) < 0) {
- break
- }
- data[j], data[j-1] = data[j-1], data[j]
- }
- }
- }
- return false
-}
-
-// breakPatternsCmpFunc scatters some elements around in an attempt to break some patterns
-// that might cause imbalanced partitions in quicksort.
-func breakPatternsCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
- length := b - a
- if length >= 8 {
- random := xorshift(length)
- modulus := nextPowerOfTwo(length)
-
- for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ {
- other := int(uint(random.Next()) & (modulus - 1))
- if other >= length {
- other -= length
- }
- data[idx], data[a+other] = data[a+other], data[idx]
- }
- }
-}
-
-// choosePivotCmpFunc chooses a pivot in data[a:b].
-//
-// [0,8): chooses a static pivot.
-// [8,shortestNinther): uses the simple median-of-three method.
-// [shortestNinther,∞): uses the Tukey ninther method.
-func choosePivotCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) (pivot int, hint sortedHint) {
- const (
- shortestNinther = 50
- maxSwaps = 4 * 3
- )
-
- l := b - a
-
- var (
- swaps int
- i = a + l/4*1
- j = a + l/4*2
- k = a + l/4*3
- )
-
- if l >= 8 {
- if l >= shortestNinther {
- // Tukey ninther method, the idea came from Rust's implementation.
- i = medianAdjacentCmpFunc(data, i, &swaps, cmp)
- j = medianAdjacentCmpFunc(data, j, &swaps, cmp)
- k = medianAdjacentCmpFunc(data, k, &swaps, cmp)
- }
- // Find the median among i, j, k and stores it into j.
- j = medianCmpFunc(data, i, j, k, &swaps, cmp)
- }
-
- switch swaps {
- case 0:
- return j, increasingHint
- case maxSwaps:
- return j, decreasingHint
- default:
- return j, unknownHint
- }
-}
-
-// order2CmpFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
-func order2CmpFunc[E any](data []E, a, b int, swaps *int, cmp func(a, b E) int) (int, int) {
- if cmp(data[b], data[a]) < 0 {
- *swaps++
- return b, a
- }
- return a, b
-}
-
-// medianCmpFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
-func medianCmpFunc[E any](data []E, a, b, c int, swaps *int, cmp func(a, b E) int) int {
- a, b = order2CmpFunc(data, a, b, swaps, cmp)
- b, c = order2CmpFunc(data, b, c, swaps, cmp)
- a, b = order2CmpFunc(data, a, b, swaps, cmp)
- return b
-}
-
-// medianAdjacentCmpFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
-func medianAdjacentCmpFunc[E any](data []E, a int, swaps *int, cmp func(a, b E) int) int {
- return medianCmpFunc(data, a-1, a, a+1, swaps, cmp)
-}
-
-func reverseRangeCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
- i := a
- j := b - 1
- for i < j {
- data[i], data[j] = data[j], data[i]
- i++
- j--
- }
-}
-
-func swapRangeCmpFunc[E any](data []E, a, b, n int, cmp func(a, b E) int) {
- for i := 0; i < n; i++ {
- data[a+i], data[b+i] = data[b+i], data[a+i]
- }
-}
-
-func stableCmpFunc[E any](data []E, n int, cmp func(a, b E) int) {
- blockSize := 20 // must be > 0
- a, b := 0, blockSize
- for b <= n {
- insertionSortCmpFunc(data, a, b, cmp)
- a = b
- b += blockSize
- }
- insertionSortCmpFunc(data, a, n, cmp)
-
- for blockSize < n {
- a, b = 0, 2*blockSize
- for b <= n {
- symMergeCmpFunc(data, a, a+blockSize, b, cmp)
- a = b
- b += 2 * blockSize
- }
- if m := a + blockSize; m < n {
- symMergeCmpFunc(data, a, m, n, cmp)
- }
- blockSize *= 2
- }
-}
-
-// symMergeCmpFunc merges the two sorted subsequences data[a:m] and data[m:b] using
-// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
-// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
-// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
-// Computer Science, pages 714-723. Springer, 2004.
-//
-// Let M = m-a and N = b-n. Wolog M < N.
-// The recursion depth is bound by ceil(log(N+M)).
-// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
-// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
-//
-// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
-// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
-// in the paper carries through for Swap operations, especially as the block
-// swapping rotate uses only O(M+N) Swaps.
-//
-// symMerge assumes non-degenerate arguments: a < m && m < b.
-// Having the caller check this condition eliminates many leaf recursion calls,
-// which improves performance.
-func symMergeCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) {
- // Avoid unnecessary recursions of symMerge
- // by direct insertion of data[a] into data[m:b]
- // if data[a:m] only contains one element.
- if m-a == 1 {
- // Use binary search to find the lowest index i
- // such that data[i] >= data[a] for m <= i < b.
- // Exit the search loop with i == b in case no such index exists.
- i := m
- j := b
- for i < j {
- h := int(uint(i+j) >> 1)
- if cmp(data[h], data[a]) < 0 {
- i = h + 1
- } else {
- j = h
- }
- }
- // Swap values until data[a] reaches the position before i.
- for k := a; k < i-1; k++ {
- data[k], data[k+1] = data[k+1], data[k]
- }
- return
- }
-
- // Avoid unnecessary recursions of symMerge
- // by direct insertion of data[m] into data[a:m]
- // if data[m:b] only contains one element.
- if b-m == 1 {
- // Use binary search to find the lowest index i
- // such that data[i] > data[m] for a <= i < m.
- // Exit the search loop with i == m in case no such index exists.
- i := a
- j := m
- for i < j {
- h := int(uint(i+j) >> 1)
- if !(cmp(data[m], data[h]) < 0) {
- i = h + 1
- } else {
- j = h
- }
- }
- // Swap values until data[m] reaches the position i.
- for k := m; k > i; k-- {
- data[k], data[k-1] = data[k-1], data[k]
- }
- return
- }
-
- mid := int(uint(a+b) >> 1)
- n := mid + m
- var start, r int
- if m > mid {
- start = n - b
- r = mid
- } else {
- start = a
- r = m
- }
- p := n - 1
-
- for start < r {
- c := int(uint(start+r) >> 1)
- if !(cmp(data[p-c], data[c]) < 0) {
- start = c + 1
- } else {
- r = c
- }
- }
-
- end := n - start
- if start < m && m < end {
- rotateCmpFunc(data, start, m, end, cmp)
- }
- if a < start && start < mid {
- symMergeCmpFunc(data, a, start, mid, cmp)
- }
- if mid < end && end < b {
- symMergeCmpFunc(data, mid, end, b, cmp)
- }
-}
-
-// rotateCmpFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
-// Data of the form 'x u v y' is changed to 'x v u y'.
-// rotate performs at most b-a many calls to data.Swap,
-// and it assumes non-degenerate arguments: a < m && m < b.
-func rotateCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) {
- i := m - a
- j := b - m
-
- for i != j {
- if i > j {
- swapRangeCmpFunc(data, m-i, m, j, cmp)
- i -= j
- } else {
- swapRangeCmpFunc(data, m-i, m+j-i, i, cmp)
- j -= i
- }
- }
- // i == j
- swapRangeCmpFunc(data, m-i, m, i, cmp)
-}
diff --git a/vendor/golang.org/x/exp/slices/zsortordered.go b/vendor/golang.org/x/exp/slices/zsortordered.go
deleted file mode 100644
index 99b47c398..000000000
--- a/vendor/golang.org/x/exp/slices/zsortordered.go
+++ /dev/null
@@ -1,481 +0,0 @@
-// Code generated by gen_sort_variants.go; DO NOT EDIT.
-
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package slices
-
-import "golang.org/x/exp/constraints"
-
-// insertionSortOrdered sorts data[a:b] using insertion sort.
-func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) {
- for i := a + 1; i < b; i++ {
- for j := i; j > a && cmpLess(data[j], data[j-1]); j-- {
- data[j], data[j-1] = data[j-1], data[j]
- }
- }
-}
-
-// siftDownOrdered implements the heap property on data[lo:hi].
-// first is an offset into the array where the root of the heap lies.
-func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) {
- root := lo
- for {
- child := 2*root + 1
- if child >= hi {
- break
- }
- if child+1 < hi && cmpLess(data[first+child], data[first+child+1]) {
- child++
- }
- if !cmpLess(data[first+root], data[first+child]) {
- return
- }
- data[first+root], data[first+child] = data[first+child], data[first+root]
- root = child
- }
-}
-
-func heapSortOrdered[E constraints.Ordered](data []E, a, b int) {
- first := a
- lo := 0
- hi := b - a
-
- // Build heap with greatest element at top.
- for i := (hi - 1) / 2; i >= 0; i-- {
- siftDownOrdered(data, i, hi, first)
- }
-
- // Pop elements, largest first, into end of data.
- for i := hi - 1; i >= 0; i-- {
- data[first], data[first+i] = data[first+i], data[first]
- siftDownOrdered(data, lo, i, first)
- }
-}
-
-// pdqsortOrdered sorts data[a:b].
-// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
-// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
-// C++ implementation: https://github.com/orlp/pdqsort
-// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
-// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
-func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) {
- const maxInsertion = 12
-
- var (
- wasBalanced = true // whether the last partitioning was reasonably balanced
- wasPartitioned = true // whether the slice was already partitioned
- )
-
- for {
- length := b - a
-
- if length <= maxInsertion {
- insertionSortOrdered(data, a, b)
- return
- }
-
- // Fall back to heapsort if too many bad choices were made.
- if limit == 0 {
- heapSortOrdered(data, a, b)
- return
- }
-
- // If the last partitioning was imbalanced, we need to breaking patterns.
- if !wasBalanced {
- breakPatternsOrdered(data, a, b)
- limit--
- }
-
- pivot, hint := choosePivotOrdered(data, a, b)
- if hint == decreasingHint {
- reverseRangeOrdered(data, a, b)
- // The chosen pivot was pivot-a elements after the start of the array.
- // After reversing it is pivot-a elements before the end of the array.
- // The idea came from Rust's implementation.
- pivot = (b - 1) - (pivot - a)
- hint = increasingHint
- }
-
- // The slice is likely already sorted.
- if wasBalanced && wasPartitioned && hint == increasingHint {
- if partialInsertionSortOrdered(data, a, b) {
- return
- }
- }
-
- // Probably the slice contains many duplicate elements, partition the slice into
- // elements equal to and elements greater than the pivot.
- if a > 0 && !cmpLess(data[a-1], data[pivot]) {
- mid := partitionEqualOrdered(data, a, b, pivot)
- a = mid
- continue
- }
-
- mid, alreadyPartitioned := partitionOrdered(data, a, b, pivot)
- wasPartitioned = alreadyPartitioned
-
- leftLen, rightLen := mid-a, b-mid
- balanceThreshold := length / 8
- if leftLen < rightLen {
- wasBalanced = leftLen >= balanceThreshold
- pdqsortOrdered(data, a, mid, limit)
- a = mid + 1
- } else {
- wasBalanced = rightLen >= balanceThreshold
- pdqsortOrdered(data, mid+1, b, limit)
- b = mid
- }
- }
-}
-
-// partitionOrdered does one quicksort partition.
-// Let p = data[pivot]
-// Moves elements in data[a:b] around, so that data[i]=p for inewpivot.
-// On return, data[newpivot] = p
-func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int, alreadyPartitioned bool) {
- data[a], data[pivot] = data[pivot], data[a]
- i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
-
- for i <= j && cmpLess(data[i], data[a]) {
- i++
- }
- for i <= j && !cmpLess(data[j], data[a]) {
- j--
- }
- if i > j {
- data[j], data[a] = data[a], data[j]
- return j, true
- }
- data[i], data[j] = data[j], data[i]
- i++
- j--
-
- for {
- for i <= j && cmpLess(data[i], data[a]) {
- i++
- }
- for i <= j && !cmpLess(data[j], data[a]) {
- j--
- }
- if i > j {
- break
- }
- data[i], data[j] = data[j], data[i]
- i++
- j--
- }
- data[j], data[a] = data[a], data[j]
- return j, false
-}
-
-// partitionEqualOrdered partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
-// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
-func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int) {
- data[a], data[pivot] = data[pivot], data[a]
- i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
-
- for {
- for i <= j && !cmpLess(data[a], data[i]) {
- i++
- }
- for i <= j && cmpLess(data[a], data[j]) {
- j--
- }
- if i > j {
- break
- }
- data[i], data[j] = data[j], data[i]
- i++
- j--
- }
- return i
-}
-
-// partialInsertionSortOrdered partially sorts a slice, returns true if the slice is sorted at the end.
-func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool {
- const (
- maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted
- shortestShifting = 50 // don't shift any elements on short arrays
- )
- i := a + 1
- for j := 0; j < maxSteps; j++ {
- for i < b && !cmpLess(data[i], data[i-1]) {
- i++
- }
-
- if i == b {
- return true
- }
-
- if b-a < shortestShifting {
- return false
- }
-
- data[i], data[i-1] = data[i-1], data[i]
-
- // Shift the smaller one to the left.
- if i-a >= 2 {
- for j := i - 1; j >= 1; j-- {
- if !cmpLess(data[j], data[j-1]) {
- break
- }
- data[j], data[j-1] = data[j-1], data[j]
- }
- }
- // Shift the greater one to the right.
- if b-i >= 2 {
- for j := i + 1; j < b; j++ {
- if !cmpLess(data[j], data[j-1]) {
- break
- }
- data[j], data[j-1] = data[j-1], data[j]
- }
- }
- }
- return false
-}
-
-// breakPatternsOrdered scatters some elements around in an attempt to break some patterns
-// that might cause imbalanced partitions in quicksort.
-func breakPatternsOrdered[E constraints.Ordered](data []E, a, b int) {
- length := b - a
- if length >= 8 {
- random := xorshift(length)
- modulus := nextPowerOfTwo(length)
-
- for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ {
- other := int(uint(random.Next()) & (modulus - 1))
- if other >= length {
- other -= length
- }
- data[idx], data[a+other] = data[a+other], data[idx]
- }
- }
-}
-
-// choosePivotOrdered chooses a pivot in data[a:b].
-//
-// [0,8): chooses a static pivot.
-// [8,shortestNinther): uses the simple median-of-three method.
-// [shortestNinther,∞): uses the Tukey ninther method.
-func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, hint sortedHint) {
- const (
- shortestNinther = 50
- maxSwaps = 4 * 3
- )
-
- l := b - a
-
- var (
- swaps int
- i = a + l/4*1
- j = a + l/4*2
- k = a + l/4*3
- )
-
- if l >= 8 {
- if l >= shortestNinther {
- // Tukey ninther method, the idea came from Rust's implementation.
- i = medianAdjacentOrdered(data, i, &swaps)
- j = medianAdjacentOrdered(data, j, &swaps)
- k = medianAdjacentOrdered(data, k, &swaps)
- }
- // Find the median among i, j, k and stores it into j.
- j = medianOrdered(data, i, j, k, &swaps)
- }
-
- switch swaps {
- case 0:
- return j, increasingHint
- case maxSwaps:
- return j, decreasingHint
- default:
- return j, unknownHint
- }
-}
-
-// order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
-func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) {
- if cmpLess(data[b], data[a]) {
- *swaps++
- return b, a
- }
- return a, b
-}
-
-// medianOrdered returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
-func medianOrdered[E constraints.Ordered](data []E, a, b, c int, swaps *int) int {
- a, b = order2Ordered(data, a, b, swaps)
- b, c = order2Ordered(data, b, c, swaps)
- a, b = order2Ordered(data, a, b, swaps)
- return b
-}
-
-// medianAdjacentOrdered finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
-func medianAdjacentOrdered[E constraints.Ordered](data []E, a int, swaps *int) int {
- return medianOrdered(data, a-1, a, a+1, swaps)
-}
-
-func reverseRangeOrdered[E constraints.Ordered](data []E, a, b int) {
- i := a
- j := b - 1
- for i < j {
- data[i], data[j] = data[j], data[i]
- i++
- j--
- }
-}
-
-func swapRangeOrdered[E constraints.Ordered](data []E, a, b, n int) {
- for i := 0; i < n; i++ {
- data[a+i], data[b+i] = data[b+i], data[a+i]
- }
-}
-
-func stableOrdered[E constraints.Ordered](data []E, n int) {
- blockSize := 20 // must be > 0
- a, b := 0, blockSize
- for b <= n {
- insertionSortOrdered(data, a, b)
- a = b
- b += blockSize
- }
- insertionSortOrdered(data, a, n)
-
- for blockSize < n {
- a, b = 0, 2*blockSize
- for b <= n {
- symMergeOrdered(data, a, a+blockSize, b)
- a = b
- b += 2 * blockSize
- }
- if m := a + blockSize; m < n {
- symMergeOrdered(data, a, m, n)
- }
- blockSize *= 2
- }
-}
-
-// symMergeOrdered merges the two sorted subsequences data[a:m] and data[m:b] using
-// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
-// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
-// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
-// Computer Science, pages 714-723. Springer, 2004.
-//
-// Let M = m-a and N = b-n. Wolog M < N.
-// The recursion depth is bound by ceil(log(N+M)).
-// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
-// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
-//
-// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
-// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
-// in the paper carries through for Swap operations, especially as the block
-// swapping rotate uses only O(M+N) Swaps.
-//
-// symMerge assumes non-degenerate arguments: a < m && m < b.
-// Having the caller check this condition eliminates many leaf recursion calls,
-// which improves performance.
-func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) {
- // Avoid unnecessary recursions of symMerge
- // by direct insertion of data[a] into data[m:b]
- // if data[a:m] only contains one element.
- if m-a == 1 {
- // Use binary search to find the lowest index i
- // such that data[i] >= data[a] for m <= i < b.
- // Exit the search loop with i == b in case no such index exists.
- i := m
- j := b
- for i < j {
- h := int(uint(i+j) >> 1)
- if cmpLess(data[h], data[a]) {
- i = h + 1
- } else {
- j = h
- }
- }
- // Swap values until data[a] reaches the position before i.
- for k := a; k < i-1; k++ {
- data[k], data[k+1] = data[k+1], data[k]
- }
- return
- }
-
- // Avoid unnecessary recursions of symMerge
- // by direct insertion of data[m] into data[a:m]
- // if data[m:b] only contains one element.
- if b-m == 1 {
- // Use binary search to find the lowest index i
- // such that data[i] > data[m] for a <= i < m.
- // Exit the search loop with i == m in case no such index exists.
- i := a
- j := m
- for i < j {
- h := int(uint(i+j) >> 1)
- if !cmpLess(data[m], data[h]) {
- i = h + 1
- } else {
- j = h
- }
- }
- // Swap values until data[m] reaches the position i.
- for k := m; k > i; k-- {
- data[k], data[k-1] = data[k-1], data[k]
- }
- return
- }
-
- mid := int(uint(a+b) >> 1)
- n := mid + m
- var start, r int
- if m > mid {
- start = n - b
- r = mid
- } else {
- start = a
- r = m
- }
- p := n - 1
-
- for start < r {
- c := int(uint(start+r) >> 1)
- if !cmpLess(data[p-c], data[c]) {
- start = c + 1
- } else {
- r = c
- }
- }
-
- end := n - start
- if start < m && m < end {
- rotateOrdered(data, start, m, end)
- }
- if a < start && start < mid {
- symMergeOrdered(data, a, start, mid)
- }
- if mid < end && end < b {
- symMergeOrdered(data, mid, end, b)
- }
-}
-
-// rotateOrdered rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
-// Data of the form 'x u v y' is changed to 'x v u y'.
-// rotate performs at most b-a many calls to data.Swap,
-// and it assumes non-degenerate arguments: a < m && m < b.
-func rotateOrdered[E constraints.Ordered](data []E, a, m, b int) {
- i := m - a
- j := b - m
-
- for i != j {
- if i > j {
- swapRangeOrdered(data, m-i, m, j)
- i -= j
- } else {
- swapRangeOrdered(data, m-i, m+j-i, i)
- j -= i
- }
- }
- // i == j
- swapRangeOrdered(data, m-i, m, i)
-}
diff --git a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go
index 958cf38de..0d5050fe4 100644
--- a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go
+++ b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go
@@ -36,6 +36,9 @@ package inspector
import (
"go/ast"
+ _ "unsafe"
+
+ "golang.org/x/tools/internal/astutil/edge"
)
// An Inspector provides methods for inspecting
@@ -44,6 +47,24 @@ type Inspector struct {
events []event
}
+//go:linkname events
+func events(in *Inspector) []event { return in.events }
+
+func packEdgeKindAndIndex(ek edge.Kind, index int) int32 {
+ return int32(uint32(index+1)<<7 | uint32(ek))
+}
+
+// unpackEdgeKindAndIndex unpacks the edge kind and edge index (within
+// an []ast.Node slice) from the parent field of a pop event.
+//
+//go:linkname unpackEdgeKindAndIndex
+func unpackEdgeKindAndIndex(x int32) (edge.Kind, int) {
+ // The "parent" field of a pop node holds the
+ // edge Kind in the lower 7 bits and the index+1
+ // in the upper 25.
+ return edge.Kind(x & 0x7f), int(x>>7) - 1
+}
+
// New returns an Inspector for the specified syntax trees.
func New(files []*ast.File) *Inspector {
return &Inspector{traverse(files)}
@@ -52,9 +73,10 @@ func New(files []*ast.File) *Inspector {
// An event represents a push or a pop
// of an ast.Node during a traversal.
type event struct {
- node ast.Node
- typ uint64 // typeOf(node) on push event, or union of typ strictly between push and pop events on pop events
- index int // index of corresponding push or pop event
+ node ast.Node
+ typ uint64 // typeOf(node) on push event, or union of typ strictly between push and pop events on pop events
+ index int32 // index of corresponding push or pop event
+ parent int32 // index of parent's push node (push nodes only), or packed edge kind/index (pop nodes only)
}
// TODO: Experiment with storing only the second word of event.node (unsafe.Pointer).
@@ -83,7 +105,7 @@ func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) {
// })
mask := maskOf(types)
- for i := 0; i < len(in.events); {
+ for i := int32(0); i < int32(len(in.events)); {
ev := in.events[i]
if ev.index > i {
// push
@@ -113,7 +135,7 @@ func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) {
// matches an element of the types slice.
func (in *Inspector) Nodes(types []ast.Node, f func(n ast.Node, push bool) (proceed bool)) {
mask := maskOf(types)
- for i := 0; i < len(in.events); {
+ for i := int32(0); i < int32(len(in.events)); {
ev := in.events[i]
if ev.index > i {
// push
@@ -147,7 +169,7 @@ func (in *Inspector) Nodes(types []ast.Node, f func(n ast.Node, push bool) (proc
func (in *Inspector) WithStack(types []ast.Node, f func(n ast.Node, push bool, stack []ast.Node) (proceed bool)) {
mask := maskOf(types)
var stack []ast.Node
- for i := 0; i < len(in.events); {
+ for i := int32(0); i < int32(len(in.events)); {
ev := in.events[i]
if ev.index > i {
// push
@@ -189,43 +211,74 @@ func traverse(files []*ast.File) []event {
extent += int(f.End() - f.Pos())
}
// This estimate is based on the net/http package.
- capacity := extent * 33 / 100
- if capacity > 1e6 {
- capacity = 1e6 // impose some reasonable maximum
+ capacity := min(extent*33/100, 1e6) // impose some reasonable maximum (1M)
+
+ v := &visitor{
+ events: make([]event, 0, capacity),
+ stack: []item{{index: -1}}, // include an extra event so file nodes have a parent
+ }
+ for _, file := range files {
+ walk(v, edge.Invalid, -1, file)
}
- events := make([]event, 0, capacity)
+ return v.events
+}
- var stack []event
- stack = append(stack, event{}) // include an extra event so file nodes have a parent
- for _, f := range files {
- ast.Inspect(f, func(n ast.Node) bool {
- if n != nil {
- // push
- ev := event{
- node: n,
- typ: 0, // temporarily used to accumulate type bits of subtree
- index: len(events), // push event temporarily holds own index
- }
- stack = append(stack, ev)
- events = append(events, ev)
- } else {
- // pop
- top := len(stack) - 1
- ev := stack[top]
- typ := typeOf(ev.node)
- push := ev.index
- parent := top - 1
-
- events[push].typ = typ // set type of push
- stack[parent].typ |= typ | ev.typ // parent's typ contains push and pop's typs.
- events[push].index = len(events) // make push refer to pop
-
- stack = stack[:top]
- events = append(events, ev)
- }
- return true
- })
+type visitor struct {
+ events []event
+ stack []item
+}
+
+type item struct {
+ index int32 // index of current node's push event
+ parentIndex int32 // index of parent node's push event
+ typAccum uint64 // accumulated type bits of current node's descendents
+ edgeKindAndIndex int32 // edge.Kind and index, bit packed
+}
+
+func (v *visitor) push(ek edge.Kind, eindex int, node ast.Node) {
+ var (
+ index = int32(len(v.events))
+ parentIndex = v.stack[len(v.stack)-1].index
+ )
+ v.events = append(v.events, event{
+ node: node,
+ parent: parentIndex,
+ typ: typeOf(node),
+ index: 0, // (pop index is set later by visitor.pop)
+ })
+ v.stack = append(v.stack, item{
+ index: index,
+ parentIndex: parentIndex,
+ edgeKindAndIndex: packEdgeKindAndIndex(ek, eindex),
+ })
+
+ // 2B nodes ought to be enough for anyone!
+ if int32(len(v.events)) < 0 {
+ panic("event index exceeded int32")
}
- return events
+ // 32M elements in an []ast.Node ought to be enough for anyone!
+ if ek2, eindex2 := unpackEdgeKindAndIndex(packEdgeKindAndIndex(ek, eindex)); ek2 != ek || eindex2 != eindex {
+ panic("Node slice index exceeded uint25")
+ }
+}
+
+func (v *visitor) pop(node ast.Node) {
+ top := len(v.stack) - 1
+ current := v.stack[top]
+
+ push := &v.events[current.index]
+ parent := &v.stack[top-1]
+
+ push.index = int32(len(v.events)) // make push event refer to pop
+ parent.typAccum |= current.typAccum | push.typ // accumulate type bits into parent
+
+ v.stack = v.stack[:top]
+
+ v.events = append(v.events, event{
+ node: node,
+ typ: current.typAccum,
+ index: current.index,
+ parent: current.edgeKindAndIndex, // see [unpackEdgeKindAndIndex]
+ })
}
diff --git a/vendor/golang.org/x/tools/go/ast/inspector/iter.go b/vendor/golang.org/x/tools/go/ast/inspector/iter.go
index b7e959114..c576dc70a 100644
--- a/vendor/golang.org/x/tools/go/ast/inspector/iter.go
+++ b/vendor/golang.org/x/tools/go/ast/inspector/iter.go
@@ -26,7 +26,7 @@ func (in *Inspector) PreorderSeq(types ...ast.Node) iter.Seq[ast.Node] {
return func(yield func(ast.Node) bool) {
mask := maskOf(types)
- for i := 0; i < len(in.events); {
+ for i := int32(0); i < int32(len(in.events)); {
ev := in.events[i]
if ev.index > i {
// push
@@ -63,7 +63,7 @@ func All[N interface {
mask := typeOf((N)(nil))
return func(yield func(N) bool) {
- for i := 0; i < len(in.events); {
+ for i := int32(0); i < int32(len(in.events)); {
ev := in.events[i]
if ev.index > i {
// push
diff --git a/vendor/golang.org/x/tools/go/ast/inspector/typeof.go b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go
index 2a872f89d..977844845 100644
--- a/vendor/golang.org/x/tools/go/ast/inspector/typeof.go
+++ b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go
@@ -12,6 +12,8 @@ package inspector
import (
"go/ast"
"math"
+
+ _ "unsafe"
)
const (
@@ -215,8 +217,9 @@ func typeOf(n ast.Node) uint64 {
return 0
}
+//go:linkname maskOf
func maskOf(nodes []ast.Node) uint64 {
- if nodes == nil {
+ if len(nodes) == 0 {
return math.MaxUint64 // match all node types
}
var mask uint64
diff --git a/vendor/golang.org/x/tools/go/ast/inspector/walk.go b/vendor/golang.org/x/tools/go/ast/inspector/walk.go
new file mode 100644
index 000000000..5a42174a0
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/ast/inspector/walk.go
@@ -0,0 +1,341 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package inspector
+
+// This file is a fork of ast.Inspect to reduce unnecessary dynamic
+// calls and to gather edge information.
+//
+// Consistency with the original is ensured by TestInspectAllNodes.
+
+import (
+ "fmt"
+ "go/ast"
+
+ "golang.org/x/tools/internal/astutil/edge"
+)
+
+func walkList[N ast.Node](v *visitor, ek edge.Kind, list []N) {
+ for i, node := range list {
+ walk(v, ek, i, node)
+ }
+}
+
+func walk(v *visitor, ek edge.Kind, index int, node ast.Node) {
+ v.push(ek, index, node)
+
+ // walk children
+ // (the order of the cases matches the order
+ // of the corresponding node types in ast.go)
+ switch n := node.(type) {
+ // Comments and fields
+ case *ast.Comment:
+ // nothing to do
+
+ case *ast.CommentGroup:
+ walkList(v, edge.CommentGroup_List, n.List)
+
+ case *ast.Field:
+ if n.Doc != nil {
+ walk(v, edge.Field_Doc, -1, n.Doc)
+ }
+ walkList(v, edge.Field_Names, n.Names)
+ if n.Type != nil {
+ walk(v, edge.Field_Type, -1, n.Type)
+ }
+ if n.Tag != nil {
+ walk(v, edge.Field_Tag, -1, n.Tag)
+ }
+ if n.Comment != nil {
+ walk(v, edge.Field_Comment, -1, n.Comment)
+ }
+
+ case *ast.FieldList:
+ walkList(v, edge.FieldList_List, n.List)
+
+ // Expressions
+ case *ast.BadExpr, *ast.Ident, *ast.BasicLit:
+ // nothing to do
+
+ case *ast.Ellipsis:
+ if n.Elt != nil {
+ walk(v, edge.Ellipsis_Elt, -1, n.Elt)
+ }
+
+ case *ast.FuncLit:
+ walk(v, edge.FuncLit_Type, -1, n.Type)
+ walk(v, edge.FuncLit_Body, -1, n.Body)
+
+ case *ast.CompositeLit:
+ if n.Type != nil {
+ walk(v, edge.CompositeLit_Type, -1, n.Type)
+ }
+ walkList(v, edge.CompositeLit_Elts, n.Elts)
+
+ case *ast.ParenExpr:
+ walk(v, edge.ParenExpr_X, -1, n.X)
+
+ case *ast.SelectorExpr:
+ walk(v, edge.SelectorExpr_X, -1, n.X)
+ walk(v, edge.SelectorExpr_Sel, -1, n.Sel)
+
+ case *ast.IndexExpr:
+ walk(v, edge.IndexExpr_X, -1, n.X)
+ walk(v, edge.IndexExpr_Index, -1, n.Index)
+
+ case *ast.IndexListExpr:
+ walk(v, edge.IndexListExpr_X, -1, n.X)
+ walkList(v, edge.IndexListExpr_Indices, n.Indices)
+
+ case *ast.SliceExpr:
+ walk(v, edge.SliceExpr_X, -1, n.X)
+ if n.Low != nil {
+ walk(v, edge.SliceExpr_Low, -1, n.Low)
+ }
+ if n.High != nil {
+ walk(v, edge.SliceExpr_High, -1, n.High)
+ }
+ if n.Max != nil {
+ walk(v, edge.SliceExpr_Max, -1, n.Max)
+ }
+
+ case *ast.TypeAssertExpr:
+ walk(v, edge.TypeAssertExpr_X, -1, n.X)
+ if n.Type != nil {
+ walk(v, edge.TypeAssertExpr_Type, -1, n.Type)
+ }
+
+ case *ast.CallExpr:
+ walk(v, edge.CallExpr_Fun, -1, n.Fun)
+ walkList(v, edge.CallExpr_Args, n.Args)
+
+ case *ast.StarExpr:
+ walk(v, edge.StarExpr_X, -1, n.X)
+
+ case *ast.UnaryExpr:
+ walk(v, edge.UnaryExpr_X, -1, n.X)
+
+ case *ast.BinaryExpr:
+ walk(v, edge.BinaryExpr_X, -1, n.X)
+ walk(v, edge.BinaryExpr_Y, -1, n.Y)
+
+ case *ast.KeyValueExpr:
+ walk(v, edge.KeyValueExpr_Key, -1, n.Key)
+ walk(v, edge.KeyValueExpr_Value, -1, n.Value)
+
+ // Types
+ case *ast.ArrayType:
+ if n.Len != nil {
+ walk(v, edge.ArrayType_Len, -1, n.Len)
+ }
+ walk(v, edge.ArrayType_Elt, -1, n.Elt)
+
+ case *ast.StructType:
+ walk(v, edge.StructType_Fields, -1, n.Fields)
+
+ case *ast.FuncType:
+ if n.TypeParams != nil {
+ walk(v, edge.FuncType_TypeParams, -1, n.TypeParams)
+ }
+ if n.Params != nil {
+ walk(v, edge.FuncType_Params, -1, n.Params)
+ }
+ if n.Results != nil {
+ walk(v, edge.FuncType_Results, -1, n.Results)
+ }
+
+ case *ast.InterfaceType:
+ walk(v, edge.InterfaceType_Methods, -1, n.Methods)
+
+ case *ast.MapType:
+ walk(v, edge.MapType_Key, -1, n.Key)
+ walk(v, edge.MapType_Value, -1, n.Value)
+
+ case *ast.ChanType:
+ walk(v, edge.ChanType_Value, -1, n.Value)
+
+ // Statements
+ case *ast.BadStmt:
+ // nothing to do
+
+ case *ast.DeclStmt:
+ walk(v, edge.DeclStmt_Decl, -1, n.Decl)
+
+ case *ast.EmptyStmt:
+ // nothing to do
+
+ case *ast.LabeledStmt:
+ walk(v, edge.LabeledStmt_Label, -1, n.Label)
+ walk(v, edge.LabeledStmt_Stmt, -1, n.Stmt)
+
+ case *ast.ExprStmt:
+ walk(v, edge.ExprStmt_X, -1, n.X)
+
+ case *ast.SendStmt:
+ walk(v, edge.SendStmt_Chan, -1, n.Chan)
+ walk(v, edge.SendStmt_Value, -1, n.Value)
+
+ case *ast.IncDecStmt:
+ walk(v, edge.IncDecStmt_X, -1, n.X)
+
+ case *ast.AssignStmt:
+ walkList(v, edge.AssignStmt_Lhs, n.Lhs)
+ walkList(v, edge.AssignStmt_Rhs, n.Rhs)
+
+ case *ast.GoStmt:
+ walk(v, edge.GoStmt_Call, -1, n.Call)
+
+ case *ast.DeferStmt:
+ walk(v, edge.DeferStmt_Call, -1, n.Call)
+
+ case *ast.ReturnStmt:
+ walkList(v, edge.ReturnStmt_Results, n.Results)
+
+ case *ast.BranchStmt:
+ if n.Label != nil {
+ walk(v, edge.BranchStmt_Label, -1, n.Label)
+ }
+
+ case *ast.BlockStmt:
+ walkList(v, edge.BlockStmt_List, n.List)
+
+ case *ast.IfStmt:
+ if n.Init != nil {
+ walk(v, edge.IfStmt_Init, -1, n.Init)
+ }
+ walk(v, edge.IfStmt_Cond, -1, n.Cond)
+ walk(v, edge.IfStmt_Body, -1, n.Body)
+ if n.Else != nil {
+ walk(v, edge.IfStmt_Else, -1, n.Else)
+ }
+
+ case *ast.CaseClause:
+ walkList(v, edge.CaseClause_List, n.List)
+ walkList(v, edge.CaseClause_Body, n.Body)
+
+ case *ast.SwitchStmt:
+ if n.Init != nil {
+ walk(v, edge.SwitchStmt_Init, -1, n.Init)
+ }
+ if n.Tag != nil {
+ walk(v, edge.SwitchStmt_Tag, -1, n.Tag)
+ }
+ walk(v, edge.SwitchStmt_Body, -1, n.Body)
+
+ case *ast.TypeSwitchStmt:
+ if n.Init != nil {
+ walk(v, edge.TypeSwitchStmt_Init, -1, n.Init)
+ }
+ walk(v, edge.TypeSwitchStmt_Assign, -1, n.Assign)
+ walk(v, edge.TypeSwitchStmt_Body, -1, n.Body)
+
+ case *ast.CommClause:
+ if n.Comm != nil {
+ walk(v, edge.CommClause_Comm, -1, n.Comm)
+ }
+ walkList(v, edge.CommClause_Body, n.Body)
+
+ case *ast.SelectStmt:
+ walk(v, edge.SelectStmt_Body, -1, n.Body)
+
+ case *ast.ForStmt:
+ if n.Init != nil {
+ walk(v, edge.ForStmt_Init, -1, n.Init)
+ }
+ if n.Cond != nil {
+ walk(v, edge.ForStmt_Cond, -1, n.Cond)
+ }
+ if n.Post != nil {
+ walk(v, edge.ForStmt_Post, -1, n.Post)
+ }
+ walk(v, edge.ForStmt_Body, -1, n.Body)
+
+ case *ast.RangeStmt:
+ if n.Key != nil {
+ walk(v, edge.RangeStmt_Key, -1, n.Key)
+ }
+ if n.Value != nil {
+ walk(v, edge.RangeStmt_Value, -1, n.Value)
+ }
+ walk(v, edge.RangeStmt_X, -1, n.X)
+ walk(v, edge.RangeStmt_Body, -1, n.Body)
+
+ // Declarations
+ case *ast.ImportSpec:
+ if n.Doc != nil {
+ walk(v, edge.ImportSpec_Doc, -1, n.Doc)
+ }
+ if n.Name != nil {
+ walk(v, edge.ImportSpec_Name, -1, n.Name)
+ }
+ walk(v, edge.ImportSpec_Path, -1, n.Path)
+ if n.Comment != nil {
+ walk(v, edge.ImportSpec_Comment, -1, n.Comment)
+ }
+
+ case *ast.ValueSpec:
+ if n.Doc != nil {
+ walk(v, edge.ValueSpec_Doc, -1, n.Doc)
+ }
+ walkList(v, edge.ValueSpec_Names, n.Names)
+ if n.Type != nil {
+ walk(v, edge.ValueSpec_Type, -1, n.Type)
+ }
+ walkList(v, edge.ValueSpec_Values, n.Values)
+ if n.Comment != nil {
+ walk(v, edge.ValueSpec_Comment, -1, n.Comment)
+ }
+
+ case *ast.TypeSpec:
+ if n.Doc != nil {
+ walk(v, edge.TypeSpec_Doc, -1, n.Doc)
+ }
+ walk(v, edge.TypeSpec_Name, -1, n.Name)
+ if n.TypeParams != nil {
+ walk(v, edge.TypeSpec_TypeParams, -1, n.TypeParams)
+ }
+ walk(v, edge.TypeSpec_Type, -1, n.Type)
+ if n.Comment != nil {
+ walk(v, edge.TypeSpec_Comment, -1, n.Comment)
+ }
+
+ case *ast.BadDecl:
+ // nothing to do
+
+ case *ast.GenDecl:
+ if n.Doc != nil {
+ walk(v, edge.GenDecl_Doc, -1, n.Doc)
+ }
+ walkList(v, edge.GenDecl_Specs, n.Specs)
+
+ case *ast.FuncDecl:
+ if n.Doc != nil {
+ walk(v, edge.FuncDecl_Doc, -1, n.Doc)
+ }
+ if n.Recv != nil {
+ walk(v, edge.FuncDecl_Recv, -1, n.Recv)
+ }
+ walk(v, edge.FuncDecl_Name, -1, n.Name)
+ walk(v, edge.FuncDecl_Type, -1, n.Type)
+ if n.Body != nil {
+ walk(v, edge.FuncDecl_Body, -1, n.Body)
+ }
+
+ case *ast.File:
+ if n.Doc != nil {
+ walk(v, edge.File_Doc, -1, n.Doc)
+ }
+ walk(v, edge.File_Name, -1, n.Name)
+ walkList(v, edge.File_Decls, n.Decls)
+ // don't walk n.Comments - they have been
+ // visited already through the individual
+ // nodes
+
+ default:
+ // (includes *ast.Package)
+ panic(fmt.Sprintf("Walk: unexpected node type %T", n))
+ }
+
+ v.pop(node)
+}
diff --git a/vendor/golang.org/x/tools/internal/astutil/edge/edge.go b/vendor/golang.org/x/tools/internal/astutil/edge/edge.go
new file mode 100644
index 000000000..4f6ccfd6e
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/astutil/edge/edge.go
@@ -0,0 +1,295 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package edge defines identifiers for each field of an ast.Node
+// struct type that refers to another Node.
+package edge
+
+import (
+ "fmt"
+ "go/ast"
+ "reflect"
+)
+
+// A Kind describes a field of an ast.Node struct.
+type Kind uint8
+
+// String returns a description of the edge kind.
+func (k Kind) String() string {
+ if k == Invalid {
+ return ""
+ }
+ info := fieldInfos[k]
+ return fmt.Sprintf("%v.%s", info.nodeType.Elem().Name(), info.name)
+}
+
+// NodeType returns the pointer-to-struct type of the ast.Node implementation.
+func (k Kind) NodeType() reflect.Type { return fieldInfos[k].nodeType }
+
+// FieldName returns the name of the field.
+func (k Kind) FieldName() string { return fieldInfos[k].name }
+
+// FieldType returns the declared type of the field.
+func (k Kind) FieldType() reflect.Type { return fieldInfos[k].fieldType }
+
+// Get returns the direct child of n identified by (k, idx).
+// n's type must match k.NodeType().
+// idx must be a valid slice index, or -1 for a non-slice.
+func (k Kind) Get(n ast.Node, idx int) ast.Node {
+ if k.NodeType() != reflect.TypeOf(n) {
+ panic(fmt.Sprintf("%v.Get(%T): invalid node type", k, n))
+ }
+ v := reflect.ValueOf(n).Elem().Field(fieldInfos[k].index)
+ if idx != -1 {
+ v = v.Index(idx) // asserts valid index
+ } else {
+ // (The type assertion below asserts that v is not a slice.)
+ }
+ return v.Interface().(ast.Node) // may be nil
+}
+
+const (
+ Invalid Kind = iota // for nodes at the root of the traversal
+
+ // Kinds are sorted alphabetically.
+ // Numbering is not stable.
+ // Each is named Type_Field, where Type is the
+ // ast.Node struct type and Field is the name of the field
+
+ ArrayType_Elt
+ ArrayType_Len
+ AssignStmt_Lhs
+ AssignStmt_Rhs
+ BinaryExpr_X
+ BinaryExpr_Y
+ BlockStmt_List
+ BranchStmt_Label
+ CallExpr_Args
+ CallExpr_Fun
+ CaseClause_Body
+ CaseClause_List
+ ChanType_Value
+ CommClause_Body
+ CommClause_Comm
+ CommentGroup_List
+ CompositeLit_Elts
+ CompositeLit_Type
+ DeclStmt_Decl
+ DeferStmt_Call
+ Ellipsis_Elt
+ ExprStmt_X
+ FieldList_List
+ Field_Comment
+ Field_Doc
+ Field_Names
+ Field_Tag
+ Field_Type
+ File_Decls
+ File_Doc
+ File_Name
+ ForStmt_Body
+ ForStmt_Cond
+ ForStmt_Init
+ ForStmt_Post
+ FuncDecl_Body
+ FuncDecl_Doc
+ FuncDecl_Name
+ FuncDecl_Recv
+ FuncDecl_Type
+ FuncLit_Body
+ FuncLit_Type
+ FuncType_Params
+ FuncType_Results
+ FuncType_TypeParams
+ GenDecl_Doc
+ GenDecl_Specs
+ GoStmt_Call
+ IfStmt_Body
+ IfStmt_Cond
+ IfStmt_Else
+ IfStmt_Init
+ ImportSpec_Comment
+ ImportSpec_Doc
+ ImportSpec_Name
+ ImportSpec_Path
+ IncDecStmt_X
+ IndexExpr_Index
+ IndexExpr_X
+ IndexListExpr_Indices
+ IndexListExpr_X
+ InterfaceType_Methods
+ KeyValueExpr_Key
+ KeyValueExpr_Value
+ LabeledStmt_Label
+ LabeledStmt_Stmt
+ MapType_Key
+ MapType_Value
+ ParenExpr_X
+ RangeStmt_Body
+ RangeStmt_Key
+ RangeStmt_Value
+ RangeStmt_X
+ ReturnStmt_Results
+ SelectStmt_Body
+ SelectorExpr_Sel
+ SelectorExpr_X
+ SendStmt_Chan
+ SendStmt_Value
+ SliceExpr_High
+ SliceExpr_Low
+ SliceExpr_Max
+ SliceExpr_X
+ StarExpr_X
+ StructType_Fields
+ SwitchStmt_Body
+ SwitchStmt_Init
+ SwitchStmt_Tag
+ TypeAssertExpr_Type
+ TypeAssertExpr_X
+ TypeSpec_Comment
+ TypeSpec_Doc
+ TypeSpec_Name
+ TypeSpec_Type
+ TypeSpec_TypeParams
+ TypeSwitchStmt_Assign
+ TypeSwitchStmt_Body
+ TypeSwitchStmt_Init
+ UnaryExpr_X
+ ValueSpec_Comment
+ ValueSpec_Doc
+ ValueSpec_Names
+ ValueSpec_Type
+ ValueSpec_Values
+
+ maxKind
+)
+
+// Assert that the encoding fits in 7 bits,
+// as the inspector relies on this.
+// (We are currently at 104.)
+var _ = [1 << 7]struct{}{}[maxKind]
+
+type fieldInfo struct {
+ nodeType reflect.Type // pointer-to-struct type of ast.Node implementation
+ name string
+ index int
+ fieldType reflect.Type
+}
+
+func info[N ast.Node](fieldName string) fieldInfo {
+ nodePtrType := reflect.TypeFor[N]()
+ f, ok := nodePtrType.Elem().FieldByName(fieldName)
+ if !ok {
+ panic(fieldName)
+ }
+ return fieldInfo{nodePtrType, fieldName, f.Index[0], f.Type}
+}
+
+var fieldInfos = [...]fieldInfo{
+ Invalid: {},
+ ArrayType_Elt: info[*ast.ArrayType]("Elt"),
+ ArrayType_Len: info[*ast.ArrayType]("Len"),
+ AssignStmt_Lhs: info[*ast.AssignStmt]("Lhs"),
+ AssignStmt_Rhs: info[*ast.AssignStmt]("Rhs"),
+ BinaryExpr_X: info[*ast.BinaryExpr]("X"),
+ BinaryExpr_Y: info[*ast.BinaryExpr]("Y"),
+ BlockStmt_List: info[*ast.BlockStmt]("List"),
+ BranchStmt_Label: info[*ast.BranchStmt]("Label"),
+ CallExpr_Args: info[*ast.CallExpr]("Args"),
+ CallExpr_Fun: info[*ast.CallExpr]("Fun"),
+ CaseClause_Body: info[*ast.CaseClause]("Body"),
+ CaseClause_List: info[*ast.CaseClause]("List"),
+ ChanType_Value: info[*ast.ChanType]("Value"),
+ CommClause_Body: info[*ast.CommClause]("Body"),
+ CommClause_Comm: info[*ast.CommClause]("Comm"),
+ CommentGroup_List: info[*ast.CommentGroup]("List"),
+ CompositeLit_Elts: info[*ast.CompositeLit]("Elts"),
+ CompositeLit_Type: info[*ast.CompositeLit]("Type"),
+ DeclStmt_Decl: info[*ast.DeclStmt]("Decl"),
+ DeferStmt_Call: info[*ast.DeferStmt]("Call"),
+ Ellipsis_Elt: info[*ast.Ellipsis]("Elt"),
+ ExprStmt_X: info[*ast.ExprStmt]("X"),
+ FieldList_List: info[*ast.FieldList]("List"),
+ Field_Comment: info[*ast.Field]("Comment"),
+ Field_Doc: info[*ast.Field]("Doc"),
+ Field_Names: info[*ast.Field]("Names"),
+ Field_Tag: info[*ast.Field]("Tag"),
+ Field_Type: info[*ast.Field]("Type"),
+ File_Decls: info[*ast.File]("Decls"),
+ File_Doc: info[*ast.File]("Doc"),
+ File_Name: info[*ast.File]("Name"),
+ ForStmt_Body: info[*ast.ForStmt]("Body"),
+ ForStmt_Cond: info[*ast.ForStmt]("Cond"),
+ ForStmt_Init: info[*ast.ForStmt]("Init"),
+ ForStmt_Post: info[*ast.ForStmt]("Post"),
+ FuncDecl_Body: info[*ast.FuncDecl]("Body"),
+ FuncDecl_Doc: info[*ast.FuncDecl]("Doc"),
+ FuncDecl_Name: info[*ast.FuncDecl]("Name"),
+ FuncDecl_Recv: info[*ast.FuncDecl]("Recv"),
+ FuncDecl_Type: info[*ast.FuncDecl]("Type"),
+ FuncLit_Body: info[*ast.FuncLit]("Body"),
+ FuncLit_Type: info[*ast.FuncLit]("Type"),
+ FuncType_Params: info[*ast.FuncType]("Params"),
+ FuncType_Results: info[*ast.FuncType]("Results"),
+ FuncType_TypeParams: info[*ast.FuncType]("TypeParams"),
+ GenDecl_Doc: info[*ast.GenDecl]("Doc"),
+ GenDecl_Specs: info[*ast.GenDecl]("Specs"),
+ GoStmt_Call: info[*ast.GoStmt]("Call"),
+ IfStmt_Body: info[*ast.IfStmt]("Body"),
+ IfStmt_Cond: info[*ast.IfStmt]("Cond"),
+ IfStmt_Else: info[*ast.IfStmt]("Else"),
+ IfStmt_Init: info[*ast.IfStmt]("Init"),
+ ImportSpec_Comment: info[*ast.ImportSpec]("Comment"),
+ ImportSpec_Doc: info[*ast.ImportSpec]("Doc"),
+ ImportSpec_Name: info[*ast.ImportSpec]("Name"),
+ ImportSpec_Path: info[*ast.ImportSpec]("Path"),
+ IncDecStmt_X: info[*ast.IncDecStmt]("X"),
+ IndexExpr_Index: info[*ast.IndexExpr]("Index"),
+ IndexExpr_X: info[*ast.IndexExpr]("X"),
+ IndexListExpr_Indices: info[*ast.IndexListExpr]("Indices"),
+ IndexListExpr_X: info[*ast.IndexListExpr]("X"),
+ InterfaceType_Methods: info[*ast.InterfaceType]("Methods"),
+ KeyValueExpr_Key: info[*ast.KeyValueExpr]("Key"),
+ KeyValueExpr_Value: info[*ast.KeyValueExpr]("Value"),
+ LabeledStmt_Label: info[*ast.LabeledStmt]("Label"),
+ LabeledStmt_Stmt: info[*ast.LabeledStmt]("Stmt"),
+ MapType_Key: info[*ast.MapType]("Key"),
+ MapType_Value: info[*ast.MapType]("Value"),
+ ParenExpr_X: info[*ast.ParenExpr]("X"),
+ RangeStmt_Body: info[*ast.RangeStmt]("Body"),
+ RangeStmt_Key: info[*ast.RangeStmt]("Key"),
+ RangeStmt_Value: info[*ast.RangeStmt]("Value"),
+ RangeStmt_X: info[*ast.RangeStmt]("X"),
+ ReturnStmt_Results: info[*ast.ReturnStmt]("Results"),
+ SelectStmt_Body: info[*ast.SelectStmt]("Body"),
+ SelectorExpr_Sel: info[*ast.SelectorExpr]("Sel"),
+ SelectorExpr_X: info[*ast.SelectorExpr]("X"),
+ SendStmt_Chan: info[*ast.SendStmt]("Chan"),
+ SendStmt_Value: info[*ast.SendStmt]("Value"),
+ SliceExpr_High: info[*ast.SliceExpr]("High"),
+ SliceExpr_Low: info[*ast.SliceExpr]("Low"),
+ SliceExpr_Max: info[*ast.SliceExpr]("Max"),
+ SliceExpr_X: info[*ast.SliceExpr]("X"),
+ StarExpr_X: info[*ast.StarExpr]("X"),
+ StructType_Fields: info[*ast.StructType]("Fields"),
+ SwitchStmt_Body: info[*ast.SwitchStmt]("Body"),
+ SwitchStmt_Init: info[*ast.SwitchStmt]("Init"),
+ SwitchStmt_Tag: info[*ast.SwitchStmt]("Tag"),
+ TypeAssertExpr_Type: info[*ast.TypeAssertExpr]("Type"),
+ TypeAssertExpr_X: info[*ast.TypeAssertExpr]("X"),
+ TypeSpec_Comment: info[*ast.TypeSpec]("Comment"),
+ TypeSpec_Doc: info[*ast.TypeSpec]("Doc"),
+ TypeSpec_Name: info[*ast.TypeSpec]("Name"),
+ TypeSpec_Type: info[*ast.TypeSpec]("Type"),
+ TypeSpec_TypeParams: info[*ast.TypeSpec]("TypeParams"),
+ TypeSwitchStmt_Assign: info[*ast.TypeSwitchStmt]("Assign"),
+ TypeSwitchStmt_Body: info[*ast.TypeSwitchStmt]("Body"),
+ TypeSwitchStmt_Init: info[*ast.TypeSwitchStmt]("Init"),
+ UnaryExpr_X: info[*ast.UnaryExpr]("X"),
+ ValueSpec_Comment: info[*ast.ValueSpec]("Comment"),
+ ValueSpec_Doc: info[*ast.ValueSpec]("Doc"),
+ ValueSpec_Names: info[*ast.ValueSpec]("Names"),
+ ValueSpec_Type: info[*ast.ValueSpec]("Type"),
+ ValueSpec_Values: info[*ast.ValueSpec]("Values"),
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 3a9568d69..4d0e76eb1 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -118,7 +118,7 @@ github.com/Azure/go-autorest/tracing
github.com/Azure/msi-dataplane/pkg/dataplane
github.com/Azure/msi-dataplane/pkg/dataplane/internal/challenge
github.com/Azure/msi-dataplane/pkg/dataplane/internal/client
-# github.com/AzureAD/microsoft-authentication-library-for-go v1.4.0
+# github.com/AzureAD/microsoft-authentication-library-for-go v1.4.1
## explicit; go 1.18
github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache
github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential
@@ -699,9 +699,8 @@ golang.org/x/crypto/pkcs12/internal/rc2
golang.org/x/crypto/salsa20/salsa
golang.org/x/crypto/ssh
golang.org/x/crypto/ssh/internal/bcrypt_pbkdf
-# golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56
-## explicit; go 1.20
-golang.org/x/exp/constraints
+# golang.org/x/exp v0.0.0-20250228200357-dead58393ab7
+## explicit; go 1.23.0
golang.org/x/exp/slices
# golang.org/x/net v0.35.0
## explicit; go 1.18
@@ -771,10 +770,11 @@ golang.org/x/text/unicode/norm
# golang.org/x/time v0.10.0
## explicit; go 1.18
golang.org/x/time/rate
-# golang.org/x/tools v0.28.0
+# golang.org/x/tools v0.30.0
## explicit; go 1.22.0
golang.org/x/tools/cover
golang.org/x/tools/go/ast/inspector
+golang.org/x/tools/internal/astutil/edge
# google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de
## explicit; go 1.19
# google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28