diff --git a/README.md b/README.md index fdc6ed95..933c266b 100644 --- a/README.md +++ b/README.md @@ -42,7 +42,13 @@ Prometheus-kafka-adapter listens for metrics coming from Prometheus and sends th - `KAFKA_TOPIC`: defines kafka topic to be used, defaults to `metrics`. Could use go template, labels are passed (as a map) to the template: e.g: `metrics.{{ index . "__name__" }}` to use per-metric topic. Two template functions are available: replace (`{{ index . "__name__" | replace "message" "msg" }}`) and substring (`{{ index . "__name__" | substring 0 5 }}`) - `KAFKA_COMPRESSION`: defines the compression type to be used, defaults to `none`. - `KAFKA_BATCH_NUM_MESSAGES`: defines the number of messages to batch write, defaults to `10000`. -- `SERIALIZATION_FORMAT`: defines the serialization format, can be `json`, `avro-json`, defaults to `json`. +- `KAFKA_BATCH_SIZE`: Maximum size (in bytes) of all messages batched in one MessageSet, including protocol framing overhead, defaults to `1000000`. +- `KAFKA_LINGER_MS`: Delay in milliseconds to wait for messages in the producer queue to accumulate before constructing message batches, defaults to `5`. +- `SERIALIZATION_FORMAT`: defines the serialization format, can be `json`, `avro-json`, `avro-schema-registry`, defaults to `json`. +- `SCHEMA_REGISTRY_URL`: defines the schema registry url to be used, only used if `SERIALIZATION_FORMAT=avro-schema-registry`. +- `SCHEMA_REGISTRY_USERNAME`: defines the schema registry username to be used, only used if `SERIALIZATION_FORMAT=avro-schema-registry`. +- `SCHEMA_REGISTRY_PASSWORD`: defines the schema registry password to be used, only used if `SERIALIZATION_FORMAT=avro-schema-registry`. +- `SCHEMA_REGISTRY_AUTO_REGISTRY_SCHEMAS`: defines the schema registry auto registry schema, only used if `SERIALIZATION_FORMAT=avro-schema-registry`, defaults to `false`. - `PORT`: defines http port to listen, defaults to `8080`, used directly by [gin](https://github.com/gin-gonic/gin). - `BASIC_AUTH_USERNAME`: basic auth username to be used for receive endpoint, defaults is no basic auth. - `BASIC_AUTH_PASSWORD`: basic auth password to be used for receive endpoint, defaults is no basic auth. diff --git a/config.go b/config.go index 1b4da23c..d35ac324 100644 --- a/config.go +++ b/config.go @@ -17,6 +17,7 @@ package main import ( "fmt" "os" + "strconv" "strings" "text/template" @@ -28,25 +29,31 @@ import ( ) var ( - kafkaBrokerList = "kafka:9092" - kafkaTopic = "metrics" - topicTemplate *template.Template - match = make(map[string]*dto.MetricFamily, 0) - basicauth = false - basicauthUsername = "" - basicauthPassword = "" - kafkaCompression = "none" - kafkaBatchNumMessages = "10000" - kafkaSslClientCertFile = "" - kafkaSslClientKeyFile = "" - kafkaSslClientKeyPass = "" - kafkaSslCACertFile = "" - kafkaSecurityProtocol = "" - kafkaSaslMechanism = "" - kafkaSaslUsername = "" - kafkaSaslPassword = "" - serializer Serializer - kafkaAcks = "all" + kafkaBrokerList = "kafka:9092" + kafkaTopic = "metrics" + topicTemplate *template.Template + match = make(map[string]*dto.MetricFamily, 0) + basicauth = false + basicauthUsername = "" + basicauthPassword = "" + kafkaCompression = "none" + kafkaBatchNumMessages = "10000" + kafkaBatchSize = "1000000" + kafkaLingerMs = "5" + kafkaSslClientCertFile = "" + kafkaSslClientKeyFile = "" + kafkaSslClientKeyPass = "" + kafkaSslCACertFile = "" + kafkaSecurityProtocol = "" + kafkaSaslMechanism = "" + kafkaSaslUsername = "" + kafkaSaslPassword = "" + serializer Serializer + kafkaAcks = "all" + schemaRegistryUrl = "" + schemaRegistryUsername = "" + schemaRegistryPassword = "" + schemaRegistryAutoRegisterSchemas = false ) func init() { @@ -82,6 +89,14 @@ func init() { kafkaBatchNumMessages = value } + if value := os.Getenv("KAFKA_BATCH_SIZE"); value != "" { + kafkaBatchSize = value + } + + if value := os.Getenv("KAFKA_LINGER_MS"); value != "" { + kafkaLingerMs = value + } + if value := os.Getenv("KAFKA_SSL_CLIENT_CERT_FILE"); value != "" { kafkaSslClientCertFile = value } @@ -99,7 +114,7 @@ func init() { } if value := os.Getenv("KAFKA_SECURITY_PROTOCOL"); value != "" { - kafkaSecurityProtocol = strings.ToLower(value) + kafkaSecurityProtocol = value } if value := os.Getenv("KAFKA_SASL_MECHANISM"); value != "" { @@ -125,6 +140,24 @@ func init() { match = matchList } + if value := os.Getenv("SCHEMA_REGISTRY_URL"); value != "" { + schemaRegistryUrl = value + } + if value := os.Getenv("SCHEMA_REGISTRY_USERNAME"); value != "" { + schemaRegistryUsername = value + } + if value := os.Getenv("SCHEMA_REGISTRY_PASSWORD"); value != "" { + schemaRegistryPassword = value + } + if value := os.Getenv("SCHEMA_REGISTRY_AUTO_REGISTRY_SCHEMAS"); value != "" { + v, err := strconv.ParseBool(value) + if err != nil { + logrus.WithError(err).Fatalln("couldn't parse SCHEMA_REGISTRY_AUTO_REGISTRY_SCHEMAS to bool, using false") + v = false + } + schemaRegistryAutoRegisterSchemas = v + } + var err error serializer, err = parseSerializationFormat(os.Getenv("SERIALIZATION_FORMAT")) if err != nil { @@ -175,6 +208,8 @@ func parseSerializationFormat(value string) (Serializer, error) { return NewJSONSerializer() case "avro-json": return NewAvroJSONSerializer("schemas/metric.avsc") + case "avro-schema-registry": + return NewAvroSchemaRegistrySerializer(schemaRegistryUrl, schemaRegistryUsername, schemaRegistryPassword) default: logrus.WithField("serialization-format-value", value).Warningln("invalid serialization format, using json") return NewJSONSerializer() diff --git a/go.mod b/go.mod index 15895fe6..5e077275 100644 --- a/go.mod +++ b/go.mod @@ -3,18 +3,18 @@ module github.com/Telefonica/prometheus-kafka-adapter go 1.22.3 require ( - github.com/confluentinc/confluent-kafka-go v1.9.2 + github.com/confluentinc/confluent-kafka-go/v2 v2.6.1 github.com/gin-gonic/contrib v0.0.0-20240508051311-c1c6bf0061b0 github.com/gin-gonic/gin v1.10.0 github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.4 github.com/linkedin/goavro v2.1.0+incompatible - github.com/prometheus/client_golang v1.19.1 + github.com/prometheus/client_golang v1.20.5 github.com/prometheus/client_model v0.6.1 - github.com/prometheus/common v0.53.0 - github.com/prometheus/prometheus v0.52.1 + github.com/prometheus/common v0.61.0 + github.com/prometheus/prometheus v0.300.1 github.com/sirupsen/logrus v1.9.3 - github.com/stretchr/testify v1.9.0 + github.com/stretchr/testify v1.10.0 gopkg.in/yaml.v2 v2.4.0 ) @@ -32,23 +32,30 @@ require ( github.com/go-playground/universal-translator v0.18.1 // indirect github.com/go-playground/validator/v10 v10.20.0 // indirect github.com/goccy/go-json v0.10.2 // indirect + github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect + github.com/hamba/avro/v2 v2.24.0 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.17.10 // indirect github.com/klauspost/cpuid/v2 v2.2.7 // indirect + github.com/kr/text v0.2.0 // indirect github.com/leodido/go-urn v1.4.0 // indirect github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/moby/sys/userns v0.1.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pelletier/go-toml/v2 v2.2.2 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/procfs v0.12.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect github.com/twitchyliquid64/golang-asm v0.15.1 // indirect github.com/ugorji/go/codec v1.2.12 // indirect golang.org/x/arch v0.8.0 // indirect - golang.org/x/crypto v0.23.0 // indirect - golang.org/x/net v0.25.0 // indirect - golang.org/x/sys v0.20.0 // indirect - golang.org/x/text v0.15.0 // indirect - google.golang.org/protobuf v1.34.1 // indirect + golang.org/x/crypto v0.30.0 // indirect + golang.org/x/net v0.32.0 // indirect + golang.org/x/sys v0.28.0 // indirect + golang.org/x/text v0.21.0 // indirect + google.golang.org/protobuf v1.35.2 // indirect gopkg.in/linkedin/goavro.v1 v1.0.5 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 13b710fa..83ea8c03 100644 --- a/go.sum +++ b/go.sum @@ -1,61 +1,167 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/actgardner/gogen-avro/v10 v10.1.0/go.mod h1:o+ybmVjEa27AAr35FRqU98DJu1fXES56uXniYFv4yDA= -github.com/actgardner/gogen-avro/v10 v10.2.1/go.mod h1:QUhjeHPchheYmMDni/Nx7VB0RsT/ee8YIgGY/xpEQgQ= -github.com/actgardner/gogen-avro/v9 v9.1.0/go.mod h1:nyTj6wPqDJoxM3qdnjcLv+EnMDSDFqE0qDpva2QRmKc= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +cloud.google.com/go/auth v0.9.5 h1:4CTn43Eynw40aFVr3GpPqsQponx2jv0BQpjvajsbbzw= +cloud.google.com/go/auth v0.9.5/go.mod h1:Xo0n7n66eHyOWWCnitop6870Ilwo3PiZyodVkkH1xWM= +cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY= +cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= +cloud.google.com/go/compute v1.25.1 h1:ZRpHJedLtTpKgr3RV1Fx23NuaAEN1Zfx9hw1u4aJdjU= +cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo= +cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= +dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= +dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= +github.com/AlecAivazis/survey/v2 v2.3.7 h1:6I/u8FvytdGsgonrYsVn2t8t4QiRnh6QSTqkkhIiSjQ= +github.com/AlecAivazis/survey/v2 v2.3.7/go.mod h1:xUTIdE4KCOIjsBAE1JYsUPoCqYdZ1reCfTwbto0Fduo= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 h1:nyQWyZvwGTvunIMxi1Y9uXkcyr+I7TeNrr/foo4Kpk8= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.1.0 h1:DRiANoJTiW6obBQe3SqZizkuV1PEgfiiGivmVocDy64= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.1.0/go.mod h1:qLIye2hwb/ZouqhpSD9Zn3SJipvpEnz1Ywl3VUk9Y0s= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0 h1:D3occbWoio4EBLkbkevetNMAVX197GkzbUMtqjGWn80= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0/go.mod h1:bTSOgj05NGRuHHhQwAdPnYr9TOdNmKlZTgGLL6nyAdI= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= +github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/Microsoft/hcsshim v0.11.5 h1:haEcLNpj9Ka1gd3B3tAEs9CpE0c+1IhoL59w/exYU38= +github.com/Microsoft/hcsshim v0.11.5/go.mod h1:MV8xMfmECjl5HdO7U/3/hFVnkmSBjAjmA09d4bExKcU= +github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8= +github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= +github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI= +github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g= +github.com/aws/aws-sdk-go-v2 v1.26.1 h1:5554eUqIYVWpU0YmeeYZ0wU64H2VLBs8TlhRB2L+EkA= +github.com/aws/aws-sdk-go-v2 v1.26.1/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM= +github.com/aws/aws-sdk-go-v2/config v1.27.10 h1:PS+65jThT0T/snC5WjyfHHyUgG+eBoupSDV+f838cro= +github.com/aws/aws-sdk-go-v2/config v1.27.10/go.mod h1:BePM7Vo4OBpHreKRUMuDXX+/+JWP38FLkzl5m27/Jjs= +github.com/aws/aws-sdk-go-v2/credentials v1.17.10 h1:qDZ3EA2lv1KangvQB6y258OssCHD0xvaGiEDkG4X/10= +github.com/aws/aws-sdk-go-v2/credentials v1.17.10/go.mod h1:6t3sucOaYDwDssHQa0ojH1RpmVmF5/jArkye1b2FKMI= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1 h1:FVJ0r5XTHSmIHJV6KuDmdYhEpvlHpiSd38RQWhut5J4= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1/go.mod h1:zusuAeqezXzAB24LGuzuekqMAEgWkVYukBec3kr3jUg= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5 h1:aw39xVGeRWlWx9EzGVnhOR4yOjQDHPQ6o6NmBlscyQg= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5/go.mod h1:FSaRudD0dXiMPK2UjknVwwTYyZMRsHv3TtkabsZih5I= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5 h1:PG1F3OD1szkuQPzDw3CIQsRIrtTlUC3lP84taWzHlq0= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5/go.mod h1:jU1li6RFryMz+so64PpKtudI+QzbKoIEivqdf6LNpOc= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 h1:Ji0DY1xUsUr3I8cHps0G+XM3WWU16lP6yG8qu1GAZAs= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2/go.mod h1:5CsjAbs3NlGQyZNFACh+zztPDI7fU6eW9QsxjfnuBKg= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7 h1:ogRAwT1/gxJBcSWDMZlgyFUM962F51A5CRhDLbxLdmo= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7/go.mod h1:YCsIZhXfRPLFFCl5xxY+1T9RKzOKjCut+28JSX2DnAk= +github.com/aws/aws-sdk-go-v2/service/kms v1.30.1 h1:SBn4I0fJXF9FYOVRSVMWuhvEKoAHDikjGpS3wlmw5DE= +github.com/aws/aws-sdk-go-v2/service/kms v1.30.1/go.mod h1:2snWQJQUKsbN66vAawJuOGX7dr37pfOq9hb0tZDGIqQ= +github.com/aws/aws-sdk-go-v2/service/sso v1.20.4 h1:WzFol5Cd+yDxPAdnzTA5LmpHYSWinhmSj4rQChV0ee8= +github.com/aws/aws-sdk-go-v2/service/sso v1.20.4/go.mod h1:qGzynb/msuZIE8I75DVRCUXw3o3ZyBmUvMwQ2t/BrGM= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4 h1:Jux+gDDyi1Lruk+KHF91tK2KCuY61kzoCpvtvJJBtOE= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4/go.mod h1:mUYPBhaF2lGiukDEjJX2BLRRKTmoUSitGDUgM4tRxak= +github.com/aws/aws-sdk-go-v2/service/sts v1.28.6 h1:cwIxeBttqPN3qkaAjcEcsh8NYr8n2HZPkcKgPAi1phU= +github.com/aws/aws-sdk-go-v2/service/sts v1.28.6/go.mod h1:FZf1/nKNEkHdGGJP/cI2MoIMquumuRK6ol3QQJNDxmw= +github.com/aws/smithy-go v1.20.2 h1:tbp628ireGtzcHDDmLT/6ADHidqnwgF57XOXZe6tp4Q= +github.com/aws/smithy-go v1.20.2/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/buger/goterm v1.0.4 h1:Z9YvGmOih81P0FbVtEYTFF6YsSgxSUKEhf/f9bTMXbY= +github.com/buger/goterm v1.0.4/go.mod h1:HiFWV3xnkolgrBV3mY8m0X0Pumt4zg4QhbdOzQtB8tE= github.com/bytedance/sonic v1.11.6 h1:oUp34TzMlL+OY1OUWxHqsdkgC/Zfc85zGqw9siXjrc0= github.com/bytedance/sonic v1.11.6/go.mod h1:LysEHSvpvDySVdC2f87zGWf6CIKJcAvqab1ZaiQtds4= github.com/bytedance/sonic/loader v0.1.1 h1:c+e5Pt1k/cy5wMveRDyk2X4B9hF4g7an8N3zCYjJFNM= github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudwego/base64x v0.1.4 h1:jwCgWpFanWmN8xoIUHa2rtzmkd5J2plF/dnLS6Xd/0Y= github.com/cloudwego/base64x v0.1.4/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w= github.com/cloudwego/iasm v0.2.0 h1:1KNIy1I1H9hNNFEEH3DVnI4UujN+1zjpuk6gwHLTssg= github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/confluentinc/confluent-kafka-go v1.9.2 h1:gV/GxhMBUb03tFWkN+7kdhg+zf+QUM+wVkI9zwh770Q= -github.com/confluentinc/confluent-kafka-go v1.9.2/go.mod h1:ptXNqsuDfYbAE/LBW6pnwWZElUoWxHoV8E43DCrliyo= +github.com/compose-spec/compose-go/v2 v2.1.3 h1:bD67uqLuL/XgkAK6ir3xZvNLFPxPScEi1KW7R5esrLE= +github.com/compose-spec/compose-go/v2 v2.1.3/go.mod h1:lFN0DrMxIncJGYAXTfWuajfwj5haBJqrBkarHcnjJKc= +github.com/confluentinc/confluent-kafka-go/v2 v2.6.1 h1:XFkytnGvk/ZcY2qU0ql4E4h+ftBaGqkLO7tlZ4kRbr4= +github.com/confluentinc/confluent-kafka-go/v2 v2.6.1/go.mod h1:hScqtFIGUI1wqHIgM3mjoqEou4VweGGGX7dMpcUKves= +github.com/containerd/console v1.0.4 h1:F2g4+oChYvBTsASRTz8NP6iIAi97J3TtSAsLbIFn4ro= +github.com/containerd/console v1.0.4/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk= +github.com/containerd/containerd v1.7.18 h1:jqjZTQNfXGoEaZdW1WwPU0RqSn1Bm2Ay/KJPUuO8nao= +github.com/containerd/containerd v1.7.18/go.mod h1:IYEk9/IO6wAPUz2bCMVUbsfXjzw5UNP5fLz4PsUygQ4= +github.com/containerd/continuity v0.4.3 h1:6HVkalIp+2u1ZLH1J/pYX2oBVXlJZvh1X1A7bEZ9Su8= +github.com/containerd/continuity v0.4.3/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ= +github.com/containerd/errdefs v0.1.0 h1:m0wCRBiu1WJT/Fr+iOoQHMQS/eP5myQ8lCv4Dz5ZURM= +github.com/containerd/errdefs v0.1.0/go.mod h1:YgWiiHtLmSeBrvpw+UfPijzbLaB77mEG1WwJTDETIV0= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A= +github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw= +github.com/containerd/ttrpc v1.2.5 h1:IFckT1EFQoFBMG4c3sMdT8EP3/aKfumK1msY+Ze4oLU= +github.com/containerd/ttrpc v1.2.5/go.mod h1:YCXHsb32f+Sq5/72xHubdiJRQY9inL4a4ZQrAbN1q9o= +github.com/containerd/typeurl/v2 v2.1.1 h1:3Q4Pt7i8nYwy2KmQWIw2+1hTvwTE/6w9FqcttATPO/4= +github.com/containerd/typeurl/v2 v2.1.1/go.mod h1:IDp2JFvbwZ31H8dQbEIY7sDl2L3o3HZj1hsSQlywkQ0= +github.com/cpuguy83/dockercfg v0.3.1 h1:/FpZ+JaygUR/lZP2NlFI2DVfrOEMAIKP5wWEJdoYe9E= +github.com/cpuguy83/dockercfg v0.3.1/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/frankban/quicktest v1.2.2/go.mod h1:Qh/WofXFeiAFII1aEBu529AtJo6Zg2VHscnEsbBnJ20= -github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o= -github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= -github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/buildx v0.15.1 h1:1cO6JIc0rOoC8tlxfXoh1HH1uxaNvYH1q7J7kv5enhw= +github.com/docker/buildx v0.15.1/go.mod h1:16DQgJqoggmadc1UhLaUTPqKtR+PlByN/kyXFdkhFCo= +github.com/docker/cli v27.0.3+incompatible h1:usGs0/BoBW8MWxGeEtqPMkzOY56jZ6kYlSN5BLDioCQ= +github.com/docker/cli v27.0.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/compose/v2 v2.28.1 h1:ORPfiVHrpnRQBDoC3F8JJyWAY8N5gWuo3FgwyivxFdM= +github.com/docker/compose/v2 v2.28.1/go.mod h1:wDtGQFHe99sPLCHXeVbCkc+Wsl4Y/2ZxiAJa/nga6rA= +github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= +github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v27.3.1+incompatible h1:KttF0XoteNTicmUtBO0L2tP+J7FGRFTjaEF4k6WdhfI= +github.com/docker/docker v27.3.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.8.0 h1:YQFtbBQb4VrpoPxhFuzEBPQ9E16qz5SpHLS+uswaCp8= +github.com/docker/docker-credential-helpers v0.8.0/go.mod h1:UGFXcuoQ5TxPiB54nHOZ32AWRqQdECoh/Mg0AlEYb40= +github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c h1:lzqkGL9b3znc+ZUgi7FlLnqjQhcXxkNM/quxIjBVMD0= +github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c/go.mod h1:CADgU4DSXK5QUlFslkQu2yW2TKzFZcXq/leZfM0UH5Q= +github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= +github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/eiannone/keyboard v0.0.0-20220611211555-0d226195f203 h1:XBBHcIb256gUJtLmY22n99HaZTz+r2Z51xUPi01m3wg= +github.com/eiannone/keyboard v0.0.0-20220611211555-0d226195f203/go.mod h1:E1jcSv8FaEny+OP/5k9UxZVw9YFWGj7eI4KR/iOBqCg= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fsnotify/fsevents v0.2.0 h1:BRlvlqjvNTfogHfeBOFvSC9N0Ddy+wzQCQukyoD7o/c= +github.com/fsnotify/fsevents v0.2.0/go.mod h1:B3eEk39i4hz8y1zaWS/wPrAP4O6wkIl7HQwKBr1qH/w= +github.com/fvbommel/sortorder v1.0.2 h1:mV4o8B2hKboCdkJm+a7uX/SIpZob4JzUpc5GGnM45eo= +github.com/fvbommel/sortorder v1.0.2/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0= github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= github.com/gin-gonic/contrib v0.0.0-20240508051311-c1c6bf0061b0 h1:EUFmvQ8ffefnSAmaUZd9HZYZSw9w/bFjp3FiNaJ5WmE= github.com/gin-gonic/contrib v0.0.0-20240508051311-c1c6bf0061b0/go.mod h1:iqneQ2Df3omzIVTkIfn7c1acsVnMGiSLn4XF5Blh3Yg= github.com/gin-gonic/gin v1.10.0 h1:nTuyha1TYqgedzytsKYqna+DfLos46nTv2ygFy86HFU= github.com/gin-gonic/gin v1.10.0/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y= +github.com/go-jose/go-jose/v4 v4.0.4 h1:VsjPI33J0SB9vQM6PLmNjoHqMQNGPiZ0rHL7Ni7Q6/E= +github.com/go-jose/go-jose/v4 v4.0.4/go.mod h1:NKb5HO1EZccyMpiZNbdUw/14tiXNyUJh188dfnMCAfc= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q= +github.com/go-openapi/jsonpointer v0.20.2/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs= +github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU= +github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4= +github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZCE= +github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE= github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= @@ -64,265 +170,406 @@ github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJn github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= github.com/go-playground/validator/v10 v10.20.0 h1:K9ISHbSaI0lyB2eWMPJo+kOS/FBExVwjEviJTixqxL8= github.com/go-playground/validator/v10 v10.20.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= +github.com/go-viper/mapstructure/v2 v2.0.0 h1:dhn8MZ1gZ0mzeodTG3jt5Vj/o87xZKuNAprG2mQfMfc= +github.com/go-viper/mapstructure/v2 v2.0.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= +github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0= +github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= +github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.2.1-0.20190312032427-6f77996f0c42/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/cel-go v0.20.1 h1:nDx9r8S3L4pE61eDdt8igGj8rf5kjYR3ILxWIpWNi84= +github.com/google/cel-go v0.20.1/go.mod h1:kWcIzTsPX0zmQ+H3TirHstLLf9ep5QTsZBN9u4dOYLg= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20211008130755-947d60d73cc0/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/hamba/avro v1.5.6/go.mod h1:3vNT0RLXXpFm2Tb/5KC71ZRJlOroggq1Rcitb6k4Fr8= -github.com/heetch/avro v0.3.1/go.mod h1:4xn38Oz/+hiEUTpbVfGVLfvOg0yKLlRP7Q9+gJJILgA= -github.com/iancoleman/orderedmap v0.0.0-20190318233801-ac98e3ecb4b0/go.mod h1:N0Wam8K1arqPXNWjMo21EXnBPOPp36vB07FNRdD2geA= -github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= -github.com/invopop/jsonschema v0.4.0/go.mod h1:O9uiLokuu0+MGFlyiaqtWxwqJm41/+8Nj0lD7A36YH0= -github.com/jhump/gopoet v0.0.0-20190322174617-17282ff210b3/go.mod h1:me9yfT6IJSlOL3FCfrg+L6yzUEZ+5jW6WHt4Sk+UPUI= -github.com/jhump/gopoet v0.1.0/go.mod h1:me9yfT6IJSlOL3FCfrg+L6yzUEZ+5jW6WHt4Sk+UPUI= -github.com/jhump/goprotoc v0.5.0/go.mod h1:VrbvcYrQOrTi3i0Vf+m+oqQWk9l72mjkJCYo7UvLHRQ= -github.com/jhump/protoreflect v1.11.0/go.mod h1:U7aMIjN0NWq9swDP7xDdoMfRHb35uiuTd3Z9nFXJf5E= -github.com/jhump/protoreflect v1.12.0/go.mod h1:JytZfP5d0r8pVNLZvai7U/MCuTWITgrI4tTg7puQFKI= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM= +github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw= +github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= +github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s= +github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248= +github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I= +github.com/hamba/avro/v2 v2.24.0 h1:axTlaYDkcSY0dVekRSy8cdrsj5MG86WqosUQacKCids= +github.com/hamba/avro/v2 v2.24.0/go.mod h1:7vDfy/2+kYCE8WUHoj2et59GTv0ap7ptktMXu0QHePI= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= +github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.8 h1:iBt4Ew4XEGLfh6/bPk4rSYmuZJGizr6/x/AEizP0CQc= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.8/go.mod h1:aiJI+PIApBRQG7FZTEBx5GiiX+HbOHilUdNxUZi4eV0= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= +github.com/hashicorp/go-sockaddr v1.0.6 h1:RSG8rKU28VTUTvEKghe5gIhIQpv8evvNpnDEyqO4u9I= +github.com/hashicorp/go-sockaddr v1.0.6/go.mod h1:uoUUmtwU7n9Dv3O4SNLeFvg0SxQ3lyjsj6+CCykpaxI= +github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= +github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/vault/api v1.15.0 h1:O24FYQCWwhwKnF7CuSqP30S51rTV7vz1iACXE/pj5DA= +github.com/hashicorp/vault/api v1.15.0/go.mod h1:+5YTO09JGn0u+b6ySD/LLVf8WkJCPLAL2Vkmrn2+CM8= +github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= +github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/in-toto/in-toto-golang v0.5.0 h1:hb8bgwr0M2hGdDsLjkJ3ZqJ8JFLL/tgYdAxF/XEFBbY= +github.com/in-toto/in-toto-golang v0.5.0/go.mod h1:/Rq0IZHLV7Ku5gielPT4wPHJfH1GdHMCq8+WPxw8/BE= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4= +github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/juju/qthttptest v0.1.1/go.mod h1:aTlAv8TYaflIiTDIQYzxnl1QdPjAg8Q8qJMErpKy6A4= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.17.10 h1:oXAz+Vh0PMUvJczoi+flxpnBEPxoER1IaAnU/NMPtT0= +github.com/klauspost/compress v1.17.10/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM= github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= github.com/linkedin/goavro v2.1.0+incompatible h1:DV2aUlj2xZiuxQyvag8Dy7zjY69ENjS66bWkSfdpddY= github.com/linkedin/goavro v2.1.0+incompatible/go.mod h1:bBCwI2eGYpUI/4820s67MElg9tdeLbINjLjiM2xZFYM= -github.com/linkedin/goavro/v2 v2.10.0/go.mod h1:UgQUb2N/pmueQYH9bfqFioWxzYCZXSfF8Jw03O5sjqA= -github.com/linkedin/goavro/v2 v2.10.1/go.mod h1:UgQUb2N/pmueQYH9bfqFioWxzYCZXSfF8Jw03O5sjqA= -github.com/linkedin/goavro/v2 v2.11.1/go.mod h1:UgQUb2N/pmueQYH9bfqFioWxzYCZXSfF8Jw03O5sjqA= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= +github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk= +github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= +github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1fWh90gTKwiN4QCGoY9TWyyO4= +github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= +github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= +github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/moby/buildkit v0.14.1 h1:2epLCZTkn4CikdImtsLtIa++7DzCimrrZCT1sway+oI= +github.com/moby/buildkit v0.14.1/go.mod h1:1XssG7cAqv5Bz1xcGMxJL123iCv5TYN4Z/qf647gfuk= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= +github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= +github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= +github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= +github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= +github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/sys/mountinfo v0.7.1 h1:/tTvQaSJRr2FshkhXiIpux6fQ2Zvc4j7tAhMTStAG2g= +github.com/moby/sys/mountinfo v0.7.1/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= +github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= +github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= +github.com/moby/sys/signal v0.7.0 h1:25RW3d5TnQEoKvRbEKUGay6DCQ46IxAVTT9CUMgmsSI= +github.com/moby/sys/signal v0.7.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg= +github.com/moby/sys/symlink v0.2.0 h1:tk1rOM+Ljp0nFmfOIBtlV3rTDlWOwFRhjEeAhZB0nZc= +github.com/moby/sys/symlink v0.2.0/go.mod h1:7uZVF2dqJjG/NsClqul95CqKOBRQyYSNnJ6BMgR/gFs= +github.com/moby/sys/user v0.1.0 h1:WmZ93f5Ux6het5iituh9x2zAG7NFY9Aqi49jjE1PaQg= +github.com/moby/sys/user v0.1.0/go.mod h1:fKJhFOnsCN6xZ5gSfbM6zaHGgDJMrqt9/reuj4T7MmU= +github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g= +github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/nrwiersma/avro-benchmarks v0.0.0-20210913175520-21aec48c8f76/go.mod h1:iKyFMidsk/sVYONJRE372sJuX/QTRPacU7imPqqsu7g= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= +github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= +github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= -github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.53.0 h1:U2pL9w9nmJwJDa4qqLQ3ZaePJ6ZTwt7cMD3AG3+aLCE= -github.com/prometheus/common v0.53.0/go.mod h1:BrxBKv3FWBIGXw89Mg1AeBq7FSyRzXWI3l3e7W3RN5U= -github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= -github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= -github.com/prometheus/prometheus v0.52.1 h1:BrQ29YG+mzdGh8DgHPirHbeMGNqtL+INe0rqg7ttBJ4= -github.com/prometheus/prometheus v0.52.1/go.mod h1:3z74cVsmVH0iXOR5QBjB7Pa6A0KJeEAK5A6UsmAFb1g= -github.com/rogpeppe/clock v0.0.0-20190514195947-2896927a307a/go.mod h1:4r5QyqhjIWCcK8DO4KMclc5Iknq5qVBAlbYYzAbUScQ= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= +github.com/prometheus/common v0.61.0 h1:3gv/GThfX0cV2lpO7gkTUwZru38mxevy90Bj8YFSRQQ= +github.com/prometheus/common v0.61.0/go.mod h1:zr29OCN/2BsJRaFwG8QOBr41D6kkchKbpeNH7pAjb/s= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/prometheus/prometheus v0.300.1 h1:9KKcTTq80gkzmXW0Et/QCFSrBPgmwiS3Hlcxc6o8KlM= +github.com/prometheus/prometheus v0.300.1/go.mod h1:gtTPY/XVyCdqqnjA3NzDMb0/nc5H9hOu1RMame+gHyM= +github.com/r3labs/sse v0.0.0-20210224172625-26fe804710bc h1:zAsgcP8MhzAbhMnB1QQ2O7ZhWYVGYSR2iVcjzQuPV+o= +github.com/r3labs/sse v0.0.0-20210224172625-26fe804710bc/go.mod h1:S8xSOnV3CgpNrWd0GQ/OoQfMtlg2uPRSuTzcSGrzwK8= +github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= -github.com/santhosh-tekuri/jsonschema/v5 v5.0.0/go.mod h1:FKdcjfQW6rpZSnxxUvEA5H/cDPdvJ/SZJQLWWXWGrZ0= +github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/secure-systems-lab/go-securesystemslib v0.4.0 h1:b23VGrQhTA8cN2CbBw7/FulN9fTtqYUdS5+Oxzt+DUE= +github.com/secure-systems-lab/go-securesystemslib v0.4.0/go.mod h1:FGBZgq2tXWICsxWQW1msNf49F0Pf2Op5Htayx335Qbs= +github.com/serialx/hashring v0.0.0-20200727003509-22c0c7ab6b1b h1:h+3JX2VoWTFuyQEo87pStk/a99dzIO1mM9KxIyLPGTU= +github.com/serialx/hashring v0.0.0-20200727003509-22c0c7ab6b1b/go.mod h1:/yeG0My1xr/u+HZrFQ1tOQQQQrOawfyMUH13ai5brBc= +github.com/shibumi/go-pathspec v1.3.0 h1:QUyMZhFo0Md5B8zV8x2tesohbb5kfbpTi9rBnKh5dkI= +github.com/shibumi/go-pathspec v1.3.0/go.mod h1:Xutfslp817l2I1cZvgcfeMQJG5QnU2lh5tVaaMCl3jE= +github.com/shirou/gopsutil/v3 v3.23.12 h1:z90NtUkp3bMtmICZKpC4+WaknU1eXtp5vtbQ11DgpE4= +github.com/shirou/gopsutil/v3 v3.23.12/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM= +github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= +github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= +github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= +github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA= +github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.3.1-0.20190311161405-34c6fa2dc709/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/testcontainers/testcontainers-go v0.33.0 h1:zJS9PfXYT5O0ZFXM2xxXfk4J5UMw/kRiISng037Gxdw= +github.com/testcontainers/testcontainers-go v0.33.0/go.mod h1:W80YpTa8D5C3Yy16icheD01UTDu+LmXIA2Keo+jWtT8= +github.com/testcontainers/testcontainers-go/modules/compose v0.33.0 h1:PyrUOF+zG+xrS3p+FesyVxMI+9U+7pwhZhyFozH3jKY= +github.com/testcontainers/testcontainers-go/modules/compose v0.33.0/go.mod h1:oqZaUnFEskdZriO51YBquku/jhgzoXHPot6xe1DqKV4= +github.com/theupdateframework/notary v0.7.0 h1:QyagRZ7wlSpjT5N2qQAh/pN+DVqgekv4DzbAiAiEL3c= +github.com/theupdateframework/notary v0.7.0/go.mod h1:c9DRxcmhHmVLDay4/2fUYdISnHqbFDGRSlXPO0AhYWw= +github.com/tilt-dev/fsnotify v1.4.8-0.20220602155310-fff9c274a375 h1:QB54BJwA6x8QU9nHY3xJSZR2kX9bgpZekRKGkLTmEXA= +github.com/tilt-dev/fsnotify v1.4.8-0.20220602155310-fff9c274a375/go.mod h1:xRroudyp5iVtxKqZCrA6n2TLFRBf8bmnjr1UD4x+z7g= +github.com/tink-crypto/tink-go-gcpkms/v2 v2.1.0 h1:A/2tIdYXqUuVZeWy0Yq/PWKsXgebzMyh5mLbpNEMVUo= +github.com/tink-crypto/tink-go-gcpkms/v2 v2.1.0/go.mod h1:QXPc/i5yUEWWZ4lbe2WOam1kDdrXjGHRjl0Lzo7IQDU= +github.com/tink-crypto/tink-go-hcvault/v2 v2.1.0 h1:REG5YX2omhgPmiIT7GLqmzWFnIksZsog1FHJ+Pi1xJE= +github.com/tink-crypto/tink-go-hcvault/v2 v2.1.0/go.mod h1:OJLS+EYJo/BTViJj7EBG5deKLeQfYwVNW8HMS1qHAAo= +github.com/tink-crypto/tink-go/v2 v2.1.0 h1:QXFBguwMwTIaU17EgZpEJWsUSc60b1BAGTzBIoMdmok= +github.com/tink-crypto/tink-go/v2 v2.1.0/go.mod h1:y1TnYFt1i2eZVfx4OGc+C+EMp4CoKWAw2VSEuoicHHI= +github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= +github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= +github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= +github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= +github.com/tonistiigi/fsutil v0.0.0-20240424095704-91a3fc46842c h1:+6wg/4ORAbnSoGDzg2Q1i3CeMcT/jjhye/ZfnBHy7/M= +github.com/tonistiigi/fsutil v0.0.0-20240424095704-91a3fc46842c/go.mod h1:vbbYqJlnswsbJqWUcJN8fKtBhnEgldDrcagTgnBVKKM= +github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea h1:SXhTLE6pb6eld/v/cCndK0AMpt1wiVFb/YYmqB3/QG0= +github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea/go.mod h1:WPnis/6cRcDZSUvVmezrxJPkiO87ThFYsoUiMwWNDJk= +github.com/tonistiigi/vt100 v0.0.0-20240514184818-90bafcd6abab h1:H6aJ0yKQ0gF49Qb2z5hI1UHxSQt4JMyxebFR15KnApw= +github.com/tonistiigi/vt100 v0.0.0-20240514184818-90bafcd6abab/go.mod h1:ulncasL3N9uLrVann0m+CDlJKWsIAP34MPcOJF6VRvc= github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE= github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/xiatechs/jsonata-go v1.8.5 h1:m1NaokPKD6LPaTPRl674EQz5mpkJvM3ymjdReDEP6/A= +github.com/xiatechs/jsonata-go v1.8.5/go.mod h1:yGEvviiftcdVfhSRhRSpgyTel89T58f+690iB0fp2Vk= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= +github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0= +go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0 h1:4BZHA+B1wXEQoGNHxW8mURaLhcdGwvRnmhGbm+odRbc= +go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0/go.mod h1:3qi2EEwMgB4xnKgPLqsDP3j9qxnHDZeHsnAxfjQqTko= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 h1:UP6IpuHFkUgOQL9FFQFrZ+5LiwhhYRbi7VZSIx6Nj5s= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0/go.mod h1:qxuZLtbq5QDtdeSHsS7bcf6EH6uO6jUAgk764zd3rhM= +go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= +go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.42.0 h1:ZtfnDL+tUrs1F0Pzfwbg2d59Gru9NCH3bgSHBM6LDwU= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.42.0/go.mod h1:hG4Fj/y8TR/tlEDREo8tWstl9fO9gcFkn4xrx0Io8xU= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.42.0 h1:NmnYCiR0qNufkldjVvyQfZTHSdzeHoZ41zggMsdMcLM= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.42.0/go.mod h1:UVAO61+umUsHLtYb8KXXRoHtxUkdOPkYidzW3gipRLQ= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.42.0 h1:wNMDy/LVGLj2h3p6zg4d0gypKfWKSWI14E1C4smOgl8= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.42.0/go.mod h1:YfbDdXAAkemWJK3H/DshvlrxqFB2rtW4rY6ky/3x/H0= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0 h1:lsInsfvhVIfOI6qHVyysXMNDnjO9Npvl7tlDPJFBVd4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0/go.mod h1:KQsVNh4OjgjTG0G6EiNi1jVpnaeeKsKMRwbLN+f1+8M= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0 h1:m0yTiGDLUvVYaTFbAvCkVYIYcvwKt3G7OLoN77NUs/8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.30.0/go.mod h1:wBQbT4UekBfegL2nx0Xk1vBcnzyBPsIVm9hRG4fYcr4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0 h1:umZgi92IyxfXd/l4kaDhnKgY8rnN/cZcF1LKc6I8OQ8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0/go.mod h1:4lVs6obhSVRb1EW5FhOuBTyiQhtRtAnnva9vD3yRfq8= +go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= +go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= +go.opentelemetry.io/otel/sdk v1.30.0 h1:cHdik6irO49R5IysVhdn8oaiR9m8XluDaJAs4DfOrYE= +go.opentelemetry.io/otel/sdk v1.30.0/go.mod h1:p14X4Ok8S+sygzblytT1nqG98QG2KYKv++HE0LY/mhg= +go.opentelemetry.io/otel/sdk/metric v1.21.0 h1:smhI5oD714d6jHE6Tie36fPx4WDFIg+Y6RfAY4ICcR0= +go.opentelemetry.io/otel/sdk/metric v1.21.0/go.mod h1:FJ8RAsoPGv/wYMgBdUJXOm+6pzFY3YdljnXtv1SBE8Q= +go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys= +go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= +go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= +go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU= +go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc= golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= golang.org/x/arch v0.8.0 h1:3wRIsP3pM4yUptoR96otTUOXI367OS0+c9eeRi9doIc= golang.org/x/arch v0.8.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= -golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/crypto v0.30.0 h1:RwoQn3GkWiMkzlX562cLB7OxWvjH1L8xutO2WoJcRoY= +golang.org/x/crypto v0.30.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= +golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200505041828-1ed23360d12c/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= -golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/net v0.32.0 h1:ZqPmj8Kzc+Y6e0+skZsuACbx+wzMgo5MQsJh9Qd6aYI= +golang.org/x/net v0.32.0/go.mod h1:CwU0IoeOlnQQWJ6ioyFrfRuomB8GKF6KbYXZVyeXNfs= +golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE= +golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= -golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= +golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20220503193339-ba3ae3f07e29/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= -google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= -gopkg.in/avro.v0 v0.0.0-20171217001914-a730b5802183/go.mod h1:FvqrFXt+jCsyQibeRv4xxEJBL5iG2DDW5aeJwzDiq4A= +google.golang.org/api v0.199.0 h1:aWUXClp+VFJmqE0JPvpZOK3LDQMyFKYIow4etYd9qxs= +google.golang.org/api v0.199.0/go.mod h1:ohG4qSztDJmZdjK/Ar6MhbAmb/Rpi4JHOqagsh90K28= +google.golang.org/genproto v0.0.0-20240325203815-454cdb8f5daa h1:ePqxpG3LVx+feAUOx8YmR5T7rc0rdzK8DyxM8cQ9zq0= +google.golang.org/genproto v0.0.0-20240325203815-454cdb8f5daa/go.mod h1:CnZenrTdRJb7jc+jOm0Rkywq+9wh0QC4U8tyiRbEPPM= +google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 h1:hjSy6tcFQZ171igDaN5QHOw2n6vx40juYbC/x67CEhc= +google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= +google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= +google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= +google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +gopkg.in/cenkalti/backoff.v1 v1.1.0 h1:Arh75ttbsvlpVA7WtVpH4u9h6Zl46xuptxqLxPiSo4Y= +gopkg.in/cenkalti/backoff.v1 v1.1.0/go.mod h1:J6Vskwqd+OMVJl8C33mmtxTBs2gyzfv7UDAkHu8BrjI= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/errgo.v1 v1.0.0/go.mod h1:CxwszS/Xz1C49Ucd2i6Zil5UToP1EmyrFhKaMVbg1mk= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/httprequest.v1 v1.2.1/go.mod h1:x2Otw96yda5+8+6ZeWwHIJTFkEHWP/qP8pJOzqEtWPM= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/linkedin/goavro.v1 v1.0.5 h1:BJa69CDh0awSsLUmZ9+BowBdokpduDZSM9Zk8oKHfN4= gopkg.in/linkedin/goavro.v1 v1.0.5/go.mod h1:Aw5GdAbizjOEl0kAMHV9iHmA8reZzW/OKuJAl4Hb9F0= -gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= -gopkg.in/retry.v1 v1.0.3/go.mod h1:FJkXmWiMaAo7xB+xhvDF59zhfjDWyzmyAxiT4dB688g= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +k8s.io/api v0.31.1 h1:Xe1hX/fPW3PXYYv8BlozYqw63ytA92snr96zMW9gWTU= +k8s.io/api v0.31.1/go.mod h1:sbN1g6eY6XVLeqNsZGLnI5FwVseTrZX7Fv3O26rhAaI= +k8s.io/apimachinery v0.31.1 h1:mhcUBbj7KUjaVhyXILglcVjuS4nYXiwC+KKFBgIVy7U= +k8s.io/apimachinery v0.31.1/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/client-go v0.31.1 h1:f0ugtWSbWpxHR7sjVpQwuvw9a3ZKLXX0u0itkFXufb0= +k8s.io/client-go v0.31.1/go.mod h1:sKI8871MJN2OyeqRlmA4W4KM9KBdBUpDLu/43eGemCg= +k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= +tags.cncf.io/container-device-interface v0.7.2 h1:MLqGnWfOr1wB7m08ieI4YJ3IoLKKozEnnNYBtacDPQU= +tags.cncf.io/container-device-interface v0.7.2/go.mod h1:Xb1PvXv2BhfNb3tla4r9JL129ck1Lxv9KuU6eVOfKto= diff --git a/handlers.go b/handlers.go index 28d22225..c7be8e22 100644 --- a/handlers.go +++ b/handlers.go @@ -16,13 +16,14 @@ package main import ( "fmt" + "github.com/confluentinc/confluent-kafka-go/v2/kafka" "io/ioutil" "net/http" + "time" "github.com/gin-gonic/gin" "github.com/sirupsen/logrus" - "github.com/confluentinc/confluent-kafka-go/kafka" "github.com/golang/snappy" "github.com/gogo/protobuf/proto" @@ -75,7 +76,36 @@ func receiveHandler(producer *kafka.Producer, serializer Serializer) func(c *gin Value: metric, }, nil) + go func() { + for event := range producer.Events() { + switch ev := event.(type) { + case *kafka.Message: + message := ev + if message.TopicPartition.Error != nil { + logrus.WithError(message.TopicPartition.Error).Errorf("failed to deliver message: %v", + message.TopicPartition) + } else { + logrus.Debugf("delivered to topic %s [%d] at offset %v", + *message.TopicPartition.Topic, + message.TopicPartition.Partition, + message.TopicPartition.Offset) + } + case kafka.Error: + logrus.WithError(err).Errorf("Error: %v", ev) + default: + logrus.Infof("Ignored event: %s", ev) + } + } + }() + if err != nil { + if err.(kafka.Error).Code() == kafka.ErrQueueFull { + // Producer queue is full, wait 1s for messages to delivered + // Maybe we should fail fast? As we are losing data... + logrus.Warning("producer queue is full, waiting 1s") + time.Sleep(time.Second) + } + objectsFailed.Add(float64(1)) c.AbortWithStatus(http.StatusInternalServerError) logrus.WithError(err).Debug(fmt.Sprintf("Failing metric %v", metric)) diff --git a/main.go b/main.go index 3553be6d..5092d4e9 100644 --- a/main.go +++ b/main.go @@ -15,9 +15,9 @@ package main import ( + "github.com/confluentinc/confluent-kafka-go/v2/kafka" "time" - "github.com/confluentinc/confluent-kafka-go/kafka" "github.com/gin-gonic/contrib/ginrus" "github.com/gin-gonic/gin" "github.com/prometheus/client_golang/prometheus/promhttp" @@ -28,12 +28,15 @@ func main() { logrus.Info("creating kafka producer") kafkaConfig := kafka.ConfigMap{ - "bootstrap.servers": kafkaBrokerList, - "compression.codec": kafkaCompression, - "batch.num.messages": kafkaBatchNumMessages, - "go.batch.producer": true, // Enable batch producer (for increased performance). - "go.delivery.reports": false, // per-message delivery reports to the Events() channel - "acks": kafkaAcks, + "bootstrap.servers": kafkaBrokerList, + "compression.codec": kafkaCompression, + "batch.num.messages": kafkaBatchNumMessages, + "batch.size": kafkaBatchSize, + "linger.ms": kafkaLingerMs, + "go.batch.producer": true, // Enable batch producer (for increased performance). + "go.delivery.reports": true, // per-message delivery reports to the Events() channel + "go.logs.channel.enable": true, + "acks": kafkaAcks, } if kafkaSslClientCertFile != "" && kafkaSslClientKeyFile != "" && kafkaSslCACertFile != "" { @@ -41,7 +44,7 @@ func main() { kafkaSecurityProtocol = "ssl" } - if kafkaSecurityProtocol != "ssl" && kafkaSecurityProtocol != "sasl_ssl" { + if kafkaSecurityProtocol != "ssl" && kafkaSecurityProtocol != "SASL_SSL" { logrus.Fatal("invalid config: kafka security protocol is not ssl based but ssl config is provided") } @@ -53,7 +56,7 @@ func main() { } if kafkaSaslMechanism != "" && kafkaSaslUsername != "" && kafkaSaslPassword != "" { - if kafkaSecurityProtocol != "sasl_ssl" && kafkaSecurityProtocol != "sasl_plaintext" { + if kafkaSecurityProtocol != "SASL_SSL" && kafkaSecurityProtocol != "SASL_PLAINTEXT" { logrus.Fatal("invalid config: kafka security protocol is not sasl based but sasl config is provided") } diff --git a/serializers.go b/serializers.go index e75e86dd..0667832b 100644 --- a/serializers.go +++ b/serializers.go @@ -17,6 +17,7 @@ package main import ( "bytes" "encoding/json" + "github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/serde/avrov2" "io/ioutil" "strconv" "time" @@ -26,11 +27,21 @@ import ( "github.com/sirupsen/logrus" "github.com/linkedin/goavro" + + "github.com/confluentinc/confluent-kafka-go/v2/schemaregistry" + "github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/serde" ) // Serializer represents an abstract metrics serializer type Serializer interface { - Marshal(metric map[string]interface{}) ([]byte, error) + Marshal(metric map[string]interface{}, topic string) ([]byte, error) +} + +type Metric struct { + Timestamp string `json:"timestamp" avro:"timestamp"` + Value string `json:"value" avro:"value"` + Name string `json:"name" avro:"name"` + Labels map[string]string `json:"labels" avro:"labels"` } // Serialize generates the JSON representation for a given Prometheus metric. @@ -61,8 +72,7 @@ func Serialize(s Serializer, req *prompb.WriteRequest) (map[string][][]byte, err "name": name, "labels": labels, } - - data, err := s.Marshal(m) + data, err := s.Marshal(m, t) if err != nil { serializeFailed.Add(float64(1)) logrus.WithError(err).Errorln("couldn't marshal timeseries") @@ -79,7 +89,7 @@ func Serialize(s Serializer, req *prompb.WriteRequest) (map[string][][]byte, err type JSONSerializer struct { } -func (s *JSONSerializer) Marshal(metric map[string]interface{}) ([]byte, error) { +func (s *JSONSerializer) Marshal(metric map[string]interface{}, topic string) ([]byte, error) { return json.Marshal(metric) } @@ -92,7 +102,7 @@ type AvroJSONSerializer struct { codec *goavro.Codec } -func (s *AvroJSONSerializer) Marshal(metric map[string]interface{}) ([]byte, error) { +func (s *AvroJSONSerializer) Marshal(metric map[string]interface{}, topic string) ([]byte, error) { return s.codec.TextualFromNative(nil, metric) } @@ -115,6 +125,52 @@ func NewAvroJSONSerializer(schemaPath string) (*AvroJSONSerializer, error) { }, nil } +type SchemaRegistrySerializer struct { + ser *avrov2.Serializer +} + +func (s SchemaRegistrySerializer) Marshal(metric map[string]interface{}, topic string) ([]byte, error) { + m := mapToMetric(metric) + return s.ser.Serialize(topic, &m) +} + +func mapToMetric(metric map[string]interface{}) Metric { + return Metric{ + Timestamp: metric["timestamp"].(string), + Value: metric["value"].(string), + Name: metric["name"].(string), + Labels: metric["labels"].(map[string]string), + } +} + +func NewAvroSchemaRegistrySerializer(schemaRegistryAPIEndpoint string, schemaRegistryAPIKey string, schemaRegistryAPISecret string) (*SchemaRegistrySerializer, error) { + + client, err := schemaregistry.NewClient(schemaregistry.NewConfigWithBasicAuthentication( + schemaRegistryAPIEndpoint, + schemaRegistryAPIKey, + schemaRegistryAPISecret, + )) + + if err != nil { + logrus.WithError(err).Errorln("Failed to create schema registry client.") + return nil, err + } + + config := avrov2.NewSerializerConfig() + config.AutoRegisterSchemas = schemaRegistryAutoRegisterSchemas + + ser, err := avrov2.NewSerializer(client, serde.ValueSerde, config) + + if err != nil { + logrus.WithError(err).Errorln("Failed to create serializer") + return nil, err + } + + return &SchemaRegistrySerializer{ + ser: ser, + }, nil +} + func topic(labels map[string]string) string { var buf bytes.Buffer if err := topicTemplate.Execute(&buf, labels); err != nil { diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/adminapi.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/adminapi.go deleted file mode 100644 index e8e5bb41..00000000 --- a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/adminapi.go +++ /dev/null @@ -1,1614 +0,0 @@ -/** - * Copyright 2018 Confluent Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka - -import ( - "context" - "fmt" - "strings" - "time" - "unsafe" -) - -/* -#include "select_rdkafka.h" -#include - -static const rd_kafka_topic_result_t * -topic_result_by_idx (const rd_kafka_topic_result_t **topics, size_t cnt, size_t idx) { - if (idx >= cnt) - return NULL; - return topics[idx]; -} - -static const rd_kafka_ConfigResource_t * -ConfigResource_by_idx (const rd_kafka_ConfigResource_t **res, size_t cnt, size_t idx) { - if (idx >= cnt) - return NULL; - return res[idx]; -} - -static const rd_kafka_ConfigEntry_t * -ConfigEntry_by_idx (const rd_kafka_ConfigEntry_t **entries, size_t cnt, size_t idx) { - if (idx >= cnt) - return NULL; - return entries[idx]; -} - -static const rd_kafka_acl_result_t * -acl_result_by_idx (const rd_kafka_acl_result_t **acl_results, size_t cnt, size_t idx) { - if (idx >= cnt) - return NULL; - return acl_results[idx]; -} - -static const rd_kafka_DeleteAcls_result_response_t * -DeleteAcls_result_response_by_idx (const rd_kafka_DeleteAcls_result_response_t **delete_acls_result_responses, size_t cnt, size_t idx) { - if (idx >= cnt) - return NULL; - return delete_acls_result_responses[idx]; -} - -static const rd_kafka_AclBinding_t * -AclBinding_by_idx (const rd_kafka_AclBinding_t **acl_bindings, size_t cnt, size_t idx) { - if (idx >= cnt) - return NULL; - return acl_bindings[idx]; -} -*/ -import "C" - -// AdminClient is derived from an existing Producer or Consumer -type AdminClient struct { - handle *handle - isDerived bool // Derived from existing client handle -} - -func durationToMilliseconds(t time.Duration) int { - if t > 0 { - return (int)(t.Seconds() * 1000.0) - } - return (int)(t) -} - -// TopicResult provides per-topic operation result (error) information. -type TopicResult struct { - // Topic name - Topic string - // Error, if any, of result. Check with `Error.Code() != ErrNoError`. - Error Error -} - -// String returns a human-readable representation of a TopicResult. -func (t TopicResult) String() string { - if t.Error.code == 0 { - return t.Topic - } - return fmt.Sprintf("%s (%s)", t.Topic, t.Error.str) -} - -// TopicSpecification holds parameters for creating a new topic. -// TopicSpecification is analogous to NewTopic in the Java Topic Admin API. -type TopicSpecification struct { - // Topic name to create. - Topic string - // Number of partitions in topic. - NumPartitions int - // Default replication factor for the topic's partitions, or zero - // if an explicit ReplicaAssignment is set. - ReplicationFactor int - // (Optional) Explicit replica assignment. The outer array is - // indexed by the partition number, while the inner per-partition array - // contains the replica broker ids. The first broker in each - // broker id list will be the preferred replica. - ReplicaAssignment [][]int32 - // Topic configuration. - Config map[string]string -} - -// PartitionsSpecification holds parameters for creating additional partitions for a topic. -// PartitionsSpecification is analogous to NewPartitions in the Java Topic Admin API. -type PartitionsSpecification struct { - // Topic to create more partitions for. - Topic string - // New partition count for topic, must be higher than current partition count. - IncreaseTo int - // (Optional) Explicit replica assignment. The outer array is - // indexed by the new partition index (i.e., 0 for the first added - // partition), while the inner per-partition array - // contains the replica broker ids. The first broker in each - // broker id list will be the preferred replica. - ReplicaAssignment [][]int32 -} - -// ResourceType represents an Apache Kafka resource type -type ResourceType int - -const ( - // ResourceUnknown - Unknown - ResourceUnknown = ResourceType(C.RD_KAFKA_RESOURCE_UNKNOWN) - // ResourceAny - match any resource type (DescribeConfigs) - ResourceAny = ResourceType(C.RD_KAFKA_RESOURCE_ANY) - // ResourceTopic - Topic - ResourceTopic = ResourceType(C.RD_KAFKA_RESOURCE_TOPIC) - // ResourceGroup - Group - ResourceGroup = ResourceType(C.RD_KAFKA_RESOURCE_GROUP) - // ResourceBroker - Broker - ResourceBroker = ResourceType(C.RD_KAFKA_RESOURCE_BROKER) -) - -// String returns the human-readable representation of a ResourceType -func (t ResourceType) String() string { - return C.GoString(C.rd_kafka_ResourceType_name(C.rd_kafka_ResourceType_t(t))) -} - -// ResourceTypeFromString translates a resource type name/string to -// a ResourceType value. -func ResourceTypeFromString(typeString string) (ResourceType, error) { - switch strings.ToUpper(typeString) { - case "ANY": - return ResourceAny, nil - case "TOPIC": - return ResourceTopic, nil - case "GROUP": - return ResourceGroup, nil - case "BROKER": - return ResourceBroker, nil - default: - return ResourceUnknown, NewError(ErrInvalidArg, "Unknown resource type", false) - } -} - -// ConfigSource represents an Apache Kafka config source -type ConfigSource int - -const ( - // ConfigSourceUnknown is the default value - ConfigSourceUnknown = ConfigSource(C.RD_KAFKA_CONFIG_SOURCE_UNKNOWN_CONFIG) - // ConfigSourceDynamicTopic is dynamic topic config that is configured for a specific topic - ConfigSourceDynamicTopic = ConfigSource(C.RD_KAFKA_CONFIG_SOURCE_DYNAMIC_TOPIC_CONFIG) - // ConfigSourceDynamicBroker is dynamic broker config that is configured for a specific broker - ConfigSourceDynamicBroker = ConfigSource(C.RD_KAFKA_CONFIG_SOURCE_DYNAMIC_BROKER_CONFIG) - // ConfigSourceDynamicDefaultBroker is dynamic broker config that is configured as default for all brokers in the cluster - ConfigSourceDynamicDefaultBroker = ConfigSource(C.RD_KAFKA_CONFIG_SOURCE_DYNAMIC_DEFAULT_BROKER_CONFIG) - // ConfigSourceStaticBroker is static broker config provided as broker properties at startup (e.g. from server.properties file) - ConfigSourceStaticBroker = ConfigSource(C.RD_KAFKA_CONFIG_SOURCE_STATIC_BROKER_CONFIG) - // ConfigSourceDefault is built-in default configuration for configs that have a default value - ConfigSourceDefault = ConfigSource(C.RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG) -) - -// String returns the human-readable representation of a ConfigSource type -func (t ConfigSource) String() string { - return C.GoString(C.rd_kafka_ConfigSource_name(C.rd_kafka_ConfigSource_t(t))) -} - -// ConfigResource holds parameters for altering an Apache Kafka configuration resource -type ConfigResource struct { - // Type of resource to set. - Type ResourceType - // Name of resource to set. - Name string - // Config entries to set. - // Configuration updates are atomic, any configuration property not provided - // here will be reverted (by the broker) to its default value. - // Use DescribeConfigs to retrieve the list of current configuration entry values. - Config []ConfigEntry -} - -// String returns a human-readable representation of a ConfigResource -func (c ConfigResource) String() string { - return fmt.Sprintf("Resource(%s, %s)", c.Type, c.Name) -} - -// AlterOperation specifies the operation to perform on the ConfigEntry. -// Currently only AlterOperationSet. -type AlterOperation int - -const ( - // AlterOperationSet sets/overwrites the configuration setting. - AlterOperationSet = iota -) - -// String returns the human-readable representation of an AlterOperation -func (o AlterOperation) String() string { - switch o { - case AlterOperationSet: - return "Set" - default: - return fmt.Sprintf("Unknown%d?", int(o)) - } -} - -// ConfigEntry holds parameters for altering a resource's configuration. -type ConfigEntry struct { - // Name of configuration entry, e.g., topic configuration property name. - Name string - // Value of configuration entry. - Value string - // Operation to perform on the entry. - Operation AlterOperation -} - -// StringMapToConfigEntries creates a new map of ConfigEntry objects from the -// provided string map. The AlterOperation is set on each created entry. -func StringMapToConfigEntries(stringMap map[string]string, operation AlterOperation) []ConfigEntry { - var ceList []ConfigEntry - - for k, v := range stringMap { - ceList = append(ceList, ConfigEntry{Name: k, Value: v, Operation: operation}) - } - - return ceList -} - -// String returns a human-readable representation of a ConfigEntry. -func (c ConfigEntry) String() string { - return fmt.Sprintf("%v %s=\"%s\"", c.Operation, c.Name, c.Value) -} - -// ConfigEntryResult contains the result of a single configuration entry from a -// DescribeConfigs request. -type ConfigEntryResult struct { - // Name of configuration entry, e.g., topic configuration property name. - Name string - // Value of configuration entry. - Value string - // Source indicates the configuration source. - Source ConfigSource - // IsReadOnly indicates whether the configuration entry can be altered. - IsReadOnly bool - // IsSensitive indicates whether the configuration entry contains sensitive information, in which case the value will be unset. - IsSensitive bool - // IsSynonym indicates whether the configuration entry is a synonym for another configuration property. - IsSynonym bool - // Synonyms contains a map of configuration entries that are synonyms to this configuration entry. - Synonyms map[string]ConfigEntryResult -} - -// String returns a human-readable representation of a ConfigEntryResult. -func (c ConfigEntryResult) String() string { - return fmt.Sprintf("%s=\"%s\"", c.Name, c.Value) -} - -// setFromC sets up a ConfigEntryResult from a C ConfigEntry -func configEntryResultFromC(cEntry *C.rd_kafka_ConfigEntry_t) (entry ConfigEntryResult) { - entry.Name = C.GoString(C.rd_kafka_ConfigEntry_name(cEntry)) - cValue := C.rd_kafka_ConfigEntry_value(cEntry) - if cValue != nil { - entry.Value = C.GoString(cValue) - } - entry.Source = ConfigSource(C.rd_kafka_ConfigEntry_source(cEntry)) - entry.IsReadOnly = cint2bool(C.rd_kafka_ConfigEntry_is_read_only(cEntry)) - entry.IsSensitive = cint2bool(C.rd_kafka_ConfigEntry_is_sensitive(cEntry)) - entry.IsSynonym = cint2bool(C.rd_kafka_ConfigEntry_is_synonym(cEntry)) - - var cSynCnt C.size_t - cSyns := C.rd_kafka_ConfigEntry_synonyms(cEntry, &cSynCnt) - if cSynCnt > 0 { - entry.Synonyms = make(map[string]ConfigEntryResult) - } - - for si := 0; si < int(cSynCnt); si++ { - cSyn := C.ConfigEntry_by_idx(cSyns, cSynCnt, C.size_t(si)) - Syn := configEntryResultFromC(cSyn) - entry.Synonyms[Syn.Name] = Syn - } - - return entry -} - -// ConfigResourceResult provides the result for a resource from a AlterConfigs or -// DescribeConfigs request. -type ConfigResourceResult struct { - // Type of returned result resource. - Type ResourceType - // Name of returned result resource. - Name string - // Error, if any, of returned result resource. - Error Error - // Config entries, if any, of returned result resource. - Config map[string]ConfigEntryResult -} - -// String returns a human-readable representation of a ConfigResourceResult. -func (c ConfigResourceResult) String() string { - if c.Error.Code() != 0 { - return fmt.Sprintf("ResourceResult(%s, %s, \"%v\")", c.Type, c.Name, c.Error) - - } - return fmt.Sprintf("ResourceResult(%s, %s, %d config(s))", c.Type, c.Name, len(c.Config)) -} - -// ResourcePatternType enumerates the different types of Kafka resource patterns. -type ResourcePatternType int - -const ( - // ResourcePatternTypeUnknown is a resource pattern type not known or not set. - ResourcePatternTypeUnknown = ResourcePatternType(C.RD_KAFKA_RESOURCE_PATTERN_UNKNOWN) - // ResourcePatternTypeAny matches any resource, used for lookups. - ResourcePatternTypeAny = ResourcePatternType(C.RD_KAFKA_RESOURCE_PATTERN_ANY) - // ResourcePatternTypeMatch will perform pattern matching - ResourcePatternTypeMatch = ResourcePatternType(C.RD_KAFKA_RESOURCE_PATTERN_MATCH) - // ResourcePatternTypeLiteral matches a literal resource name - ResourcePatternTypeLiteral = ResourcePatternType(C.RD_KAFKA_RESOURCE_PATTERN_LITERAL) - // ResourcePatternTypePrefixed matches a prefixed resource name - ResourcePatternTypePrefixed = ResourcePatternType(C.RD_KAFKA_RESOURCE_PATTERN_PREFIXED) -) - -// String returns the human-readable representation of a ResourcePatternType -func (t ResourcePatternType) String() string { - return C.GoString(C.rd_kafka_ResourcePatternType_name(C.rd_kafka_ResourcePatternType_t(t))) -} - -// ResourcePatternTypeFromString translates a resource pattern type name to -// a ResourcePatternType value. -func ResourcePatternTypeFromString(patternTypeString string) (ResourcePatternType, error) { - switch strings.ToUpper(patternTypeString) { - case "ANY": - return ResourcePatternTypeAny, nil - case "MATCH": - return ResourcePatternTypeMatch, nil - case "LITERAL": - return ResourcePatternTypeLiteral, nil - case "PREFIXED": - return ResourcePatternTypePrefixed, nil - default: - return ResourcePatternTypeUnknown, NewError(ErrInvalidArg, "Unknown resource pattern type", false) - } -} - -// ACLOperation enumerates the different types of ACL operation. -type ACLOperation int - -const ( - // ACLOperationUnknown represents an unknown or unset operation - ACLOperationUnknown = ACLOperation(C.RD_KAFKA_ACL_OPERATION_UNKNOWN) - // ACLOperationAny in a filter, matches any ACLOperation - ACLOperationAny = ACLOperation(C.RD_KAFKA_ACL_OPERATION_ANY) - // ACLOperationAll represents all the operations - ACLOperationAll = ACLOperation(C.RD_KAFKA_ACL_OPERATION_ALL) - // ACLOperationRead a read operation - ACLOperationRead = ACLOperation(C.RD_KAFKA_ACL_OPERATION_READ) - // ACLOperationWrite represents a write operation - ACLOperationWrite = ACLOperation(C.RD_KAFKA_ACL_OPERATION_WRITE) - // ACLOperationCreate represents a create operation - ACLOperationCreate = ACLOperation(C.RD_KAFKA_ACL_OPERATION_CREATE) - // ACLOperationDelete represents a delete operation - ACLOperationDelete = ACLOperation(C.RD_KAFKA_ACL_OPERATION_DELETE) - // ACLOperationAlter represents an alter operation - ACLOperationAlter = ACLOperation(C.RD_KAFKA_ACL_OPERATION_ALTER) - // ACLOperationDescribe represents a describe operation - ACLOperationDescribe = ACLOperation(C.RD_KAFKA_ACL_OPERATION_DESCRIBE) - // ACLOperationClusterAction represents a cluster action operation - ACLOperationClusterAction = ACLOperation(C.RD_KAFKA_ACL_OPERATION_CLUSTER_ACTION) - // ACLOperationDescribeConfigs represents a describe configs operation - ACLOperationDescribeConfigs = ACLOperation(C.RD_KAFKA_ACL_OPERATION_DESCRIBE_CONFIGS) - // ACLOperationAlterConfigs represents an alter configs operation - ACLOperationAlterConfigs = ACLOperation(C.RD_KAFKA_ACL_OPERATION_ALTER_CONFIGS) - // ACLOperationIdempotentWrite represents an idempotent write operation - ACLOperationIdempotentWrite = ACLOperation(C.RD_KAFKA_ACL_OPERATION_IDEMPOTENT_WRITE) -) - -// String returns the human-readable representation of an ACLOperation -func (o ACLOperation) String() string { - return C.GoString(C.rd_kafka_AclOperation_name(C.rd_kafka_AclOperation_t(o))) -} - -// ACLOperationFromString translates a ACL operation name to -// a ACLOperation value. -func ACLOperationFromString(aclOperationString string) (ACLOperation, error) { - switch strings.ToUpper(aclOperationString) { - case "ANY": - return ACLOperationAny, nil - case "ALL": - return ACLOperationAll, nil - case "READ": - return ACLOperationRead, nil - case "WRITE": - return ACLOperationWrite, nil - case "CREATE": - return ACLOperationCreate, nil - case "DELETE": - return ACLOperationDelete, nil - case "ALTER": - return ACLOperationAlter, nil - case "DESCRIBE": - return ACLOperationDescribe, nil - case "CLUSTER_ACTION": - return ACLOperationClusterAction, nil - case "DESCRIBE_CONFIGS": - return ACLOperationDescribeConfigs, nil - case "ALTER_CONFIGS": - return ACLOperationAlterConfigs, nil - case "IDEMPOTENT_WRITE": - return ACLOperationIdempotentWrite, nil - default: - return ACLOperationUnknown, NewError(ErrInvalidArg, "Unknown ACL operation", false) - } -} - -// ACLPermissionType enumerates the different types of ACL permission types. -type ACLPermissionType int - -const ( - // ACLPermissionTypeUnknown represents an unknown ACLPermissionType - ACLPermissionTypeUnknown = ACLPermissionType(C.RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN) - // ACLPermissionTypeAny in a filter, matches any ACLPermissionType - ACLPermissionTypeAny = ACLPermissionType(C.RD_KAFKA_ACL_PERMISSION_TYPE_ANY) - // ACLPermissionTypeDeny disallows access - ACLPermissionTypeDeny = ACLPermissionType(C.RD_KAFKA_ACL_PERMISSION_TYPE_DENY) - // ACLPermissionTypeAllow grants access - ACLPermissionTypeAllow = ACLPermissionType(C.RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW) -) - -// String returns the human-readable representation of an ACLPermissionType -func (o ACLPermissionType) String() string { - return C.GoString(C.rd_kafka_AclPermissionType_name(C.rd_kafka_AclPermissionType_t(o))) -} - -// ACLPermissionTypeFromString translates a ACL permission type name to -// a ACLPermissionType value. -func ACLPermissionTypeFromString(aclPermissionTypeString string) (ACLPermissionType, error) { - switch strings.ToUpper(aclPermissionTypeString) { - case "ANY": - return ACLPermissionTypeAny, nil - case "DENY": - return ACLPermissionTypeDeny, nil - case "ALLOW": - return ACLPermissionTypeAllow, nil - default: - return ACLPermissionTypeUnknown, NewError(ErrInvalidArg, "Unknown ACL permission type", false) - } -} - -// ACLBinding specifies the operation and permission type for a specific principal -// over one or more resources of the same type. Used by `AdminClient.CreateACLs`, -// returned by `AdminClient.DescribeACLs` and `AdminClient.DeleteACLs`. -type ACLBinding struct { - Type ResourceType // The resource type. - // The resource name, which depends on the resource type. - // For ResourceBroker the resource name is the broker id. - Name string - ResourcePatternType ResourcePatternType // The resource pattern, relative to the name. - Principal string // The principal this ACLBinding refers to. - Host string // The host that the call is allowed to come from. - Operation ACLOperation // The operation/s specified by this binding. - PermissionType ACLPermissionType // The permission type for the specified operation. -} - -// ACLBindingFilter specifies a filter used to return a list of ACL bindings matching some or all of its attributes. -// Used by `AdminClient.DescribeACLs` and `AdminClient.DeleteACLs`. -type ACLBindingFilter = ACLBinding - -// ACLBindings is a slice of ACLBinding that also implements -// the sort interface -type ACLBindings []ACLBinding - -// ACLBindingFilters is a slice of ACLBindingFilter that also implements -// the sort interface -type ACLBindingFilters []ACLBindingFilter - -func (a ACLBindings) Len() int { - return len(a) -} - -func (a ACLBindings) Less(i, j int) bool { - if a[i].Type != a[j].Type { - return a[i].Type < a[j].Type - } - if a[i].Name != a[j].Name { - return a[i].Name < a[j].Name - } - if a[i].ResourcePatternType != a[j].ResourcePatternType { - return a[i].ResourcePatternType < a[j].ResourcePatternType - } - if a[i].Principal != a[j].Principal { - return a[i].Principal < a[j].Principal - } - if a[i].Host != a[j].Host { - return a[i].Host < a[j].Host - } - if a[i].Operation != a[j].Operation { - return a[i].Operation < a[j].Operation - } - if a[i].PermissionType != a[j].PermissionType { - return a[i].PermissionType < a[j].PermissionType - } - return true -} - -func (a ACLBindings) Swap(i, j int) { - a[i], a[j] = a[j], a[i] -} - -// CreateACLResult provides create ACL error information. -type CreateACLResult struct { - // Error, if any, of result. Check with `Error.Code() != ErrNoError`. - Error Error -} - -// DescribeACLsResult provides describe ACLs result or error information. -type DescribeACLsResult struct { - // Slice of ACL bindings matching the provided filter - ACLBindings ACLBindings - // Error, if any, of result. Check with `Error.Code() != ErrNoError`. - Error Error -} - -// DeleteACLsResult provides delete ACLs result or error information. -type DeleteACLsResult = DescribeACLsResult - -// waitResult waits for a result event on cQueue or the ctx to be cancelled, whichever happens -// first. -// The returned result event is checked for errors its error is returned if set. -func (a *AdminClient) waitResult(ctx context.Context, cQueue *C.rd_kafka_queue_t, cEventType C.rd_kafka_event_type_t) (rkev *C.rd_kafka_event_t, err error) { - - resultChan := make(chan *C.rd_kafka_event_t) - closeChan := make(chan bool) // never written to, just closed - - go func() { - for { - select { - case _, ok := <-closeChan: - if !ok { - // Context cancelled/timed out - close(resultChan) - return - } - - default: - // Wait for result event for at most 50ms - // to avoid blocking for too long if - // context is cancelled. - rkev := C.rd_kafka_queue_poll(cQueue, 50) - if rkev != nil { - resultChan <- rkev - close(resultChan) - return - } - } - } - }() - - select { - case rkev = <-resultChan: - // Result type check - if cEventType != C.rd_kafka_event_type(rkev) { - err = newErrorFromString(ErrInvalidType, - fmt.Sprintf("Expected %d result event, not %d", (int)(cEventType), (int)(C.rd_kafka_event_type(rkev)))) - C.rd_kafka_event_destroy(rkev) - return nil, err - } - - // Generic error handling - cErr := C.rd_kafka_event_error(rkev) - if cErr != 0 { - err = newErrorFromCString(cErr, C.rd_kafka_event_error_string(rkev)) - C.rd_kafka_event_destroy(rkev) - return nil, err - } - close(closeChan) - return rkev, nil - case <-ctx.Done(): - // signal close to go-routine - close(closeChan) - // wait for close from go-routine to make sure it is done - // using cQueue before we return. - rkev, ok := <-resultChan - if ok { - // throw away result since context was cancelled - C.rd_kafka_event_destroy(rkev) - } - return nil, ctx.Err() - } -} - -// cToTopicResults converts a C topic_result_t array to Go TopicResult list. -func (a *AdminClient) cToTopicResults(cTopicRes **C.rd_kafka_topic_result_t, cCnt C.size_t) (result []TopicResult, err error) { - - result = make([]TopicResult, int(cCnt)) - - for i := 0; i < int(cCnt); i++ { - cTopic := C.topic_result_by_idx(cTopicRes, cCnt, C.size_t(i)) - result[i].Topic = C.GoString(C.rd_kafka_topic_result_name(cTopic)) - result[i].Error = newErrorFromCString( - C.rd_kafka_topic_result_error(cTopic), - C.rd_kafka_topic_result_error_string(cTopic)) - } - - return result, nil -} - -// cConfigResourceToResult converts a C ConfigResource result array to Go ConfigResourceResult -func (a *AdminClient) cConfigResourceToResult(cRes **C.rd_kafka_ConfigResource_t, cCnt C.size_t) (result []ConfigResourceResult, err error) { - - result = make([]ConfigResourceResult, int(cCnt)) - - for i := 0; i < int(cCnt); i++ { - cRes := C.ConfigResource_by_idx(cRes, cCnt, C.size_t(i)) - result[i].Type = ResourceType(C.rd_kafka_ConfigResource_type(cRes)) - result[i].Name = C.GoString(C.rd_kafka_ConfigResource_name(cRes)) - result[i].Error = newErrorFromCString( - C.rd_kafka_ConfigResource_error(cRes), - C.rd_kafka_ConfigResource_error_string(cRes)) - var cConfigCnt C.size_t - cConfigs := C.rd_kafka_ConfigResource_configs(cRes, &cConfigCnt) - if cConfigCnt > 0 { - result[i].Config = make(map[string]ConfigEntryResult) - } - for ci := 0; ci < int(cConfigCnt); ci++ { - cEntry := C.ConfigEntry_by_idx(cConfigs, cConfigCnt, C.size_t(ci)) - entry := configEntryResultFromC(cEntry) - result[i].Config[entry.Name] = entry - } - } - - return result, nil -} - -// ClusterID returns the cluster ID as reported in broker metadata. -// -// Note on cancellation: Although the underlying C function respects the -// timeout, it currently cannot be manually cancelled. That means manually -// cancelling the context will block until the C function call returns. -// -// Requires broker version >= 0.10.0. -func (a *AdminClient) ClusterID(ctx context.Context) (clusterID string, err error) { - responseChan := make(chan *C.char, 1) - - go func() { - responseChan <- C.rd_kafka_clusterid(a.handle.rk, cTimeoutFromContext(ctx)) - }() - - select { - case <-ctx.Done(): - if cClusterID := <-responseChan; cClusterID != nil { - C.rd_kafka_mem_free(a.handle.rk, unsafe.Pointer(cClusterID)) - } - return "", ctx.Err() - - case cClusterID := <-responseChan: - if cClusterID == nil { // C timeout - <-ctx.Done() - return "", ctx.Err() - } - defer C.rd_kafka_mem_free(a.handle.rk, unsafe.Pointer(cClusterID)) - return C.GoString(cClusterID), nil - } -} - -// ControllerID returns the broker ID of the current controller as reported in -// broker metadata. -// -// Note on cancellation: Although the underlying C function respects the -// timeout, it currently cannot be manually cancelled. That means manually -// cancelling the context will block until the C function call returns. -// -// Requires broker version >= 0.10.0. -func (a *AdminClient) ControllerID(ctx context.Context) (controllerID int32, err error) { - responseChan := make(chan int32, 1) - - go func() { - responseChan <- int32(C.rd_kafka_controllerid(a.handle.rk, cTimeoutFromContext(ctx))) - }() - - select { - case <-ctx.Done(): - <-responseChan - return 0, ctx.Err() - - case controllerID := <-responseChan: - if controllerID < 0 { // C timeout - <-ctx.Done() - return 0, ctx.Err() - } - return controllerID, nil - } -} - -// CreateTopics creates topics in cluster. -// -// The list of TopicSpecification objects define the per-topic partition count, replicas, etc. -// -// Topic creation is non-atomic and may succeed for some topics but fail for others, -// make sure to check the result for topic-specific errors. -// -// Note: TopicSpecification is analogous to NewTopic in the Java Topic Admin API. -func (a *AdminClient) CreateTopics(ctx context.Context, topics []TopicSpecification, options ...CreateTopicsAdminOption) (result []TopicResult, err error) { - cTopics := make([]*C.rd_kafka_NewTopic_t, len(topics)) - - cErrstrSize := C.size_t(512) - cErrstr := (*C.char)(C.malloc(cErrstrSize)) - defer C.free(unsafe.Pointer(cErrstr)) - - // Convert Go TopicSpecifications to C TopicSpecifications - for i, topic := range topics { - - var cReplicationFactor C.int - if topic.ReplicationFactor == 0 { - cReplicationFactor = -1 - } else { - cReplicationFactor = C.int(topic.ReplicationFactor) - } - if topic.ReplicaAssignment != nil { - if cReplicationFactor != -1 { - return nil, newErrorFromString(ErrInvalidArg, - "TopicSpecification.ReplicationFactor and TopicSpecification.ReplicaAssignment are mutually exclusive") - } - - if len(topic.ReplicaAssignment) != topic.NumPartitions { - return nil, newErrorFromString(ErrInvalidArg, - "TopicSpecification.ReplicaAssignment must contain exactly TopicSpecification.NumPartitions partitions") - } - } - - cTopics[i] = C.rd_kafka_NewTopic_new( - C.CString(topic.Topic), - C.int(topic.NumPartitions), - cReplicationFactor, - cErrstr, cErrstrSize) - if cTopics[i] == nil { - return nil, newErrorFromString(ErrInvalidArg, - fmt.Sprintf("Topic %s: %s", topic.Topic, C.GoString(cErrstr))) - } - - defer C.rd_kafka_NewTopic_destroy(cTopics[i]) - - for p, replicas := range topic.ReplicaAssignment { - cReplicas := make([]C.int32_t, len(replicas)) - for ri, replica := range replicas { - cReplicas[ri] = C.int32_t(replica) - } - cErr := C.rd_kafka_NewTopic_set_replica_assignment( - cTopics[i], C.int32_t(p), - (*C.int32_t)(&cReplicas[0]), C.size_t(len(cReplicas)), - cErrstr, cErrstrSize) - if cErr != 0 { - return nil, newCErrorFromString(cErr, - fmt.Sprintf("Failed to set replica assignment for topic %s partition %d: %s", topic.Topic, p, C.GoString(cErrstr))) - } - } - - for key, value := range topic.Config { - cErr := C.rd_kafka_NewTopic_set_config( - cTopics[i], - C.CString(key), C.CString(value)) - if cErr != 0 { - return nil, newCErrorFromString(cErr, - fmt.Sprintf("Failed to set config %s=%s for topic %s", key, value, topic.Topic)) - } - } - } - - // Convert Go AdminOptions (if any) to C AdminOptions - genericOptions := make([]AdminOption, len(options)) - for i := range options { - genericOptions[i] = options[i] - } - cOptions, err := adminOptionsSetup(a.handle, C.RD_KAFKA_ADMIN_OP_CREATETOPICS, genericOptions) - if err != nil { - return nil, err - } - defer C.rd_kafka_AdminOptions_destroy(cOptions) - - // Create temporary queue for async operation - cQueue := C.rd_kafka_queue_new(a.handle.rk) - defer C.rd_kafka_queue_destroy(cQueue) - - // Asynchronous call - C.rd_kafka_CreateTopics( - a.handle.rk, - (**C.rd_kafka_NewTopic_t)(&cTopics[0]), - C.size_t(len(cTopics)), - cOptions, - cQueue) - - // Wait for result, error or context timeout - rkev, err := a.waitResult(ctx, cQueue, C.RD_KAFKA_EVENT_CREATETOPICS_RESULT) - if err != nil { - return nil, err - } - defer C.rd_kafka_event_destroy(rkev) - - cRes := C.rd_kafka_event_CreateTopics_result(rkev) - - // Convert result from C to Go - var cCnt C.size_t - cTopicRes := C.rd_kafka_CreateTopics_result_topics(cRes, &cCnt) - - return a.cToTopicResults(cTopicRes, cCnt) -} - -// DeleteTopics deletes a batch of topics. -// -// This operation is not transactional and may succeed for a subset of topics while -// failing others. -// It may take several seconds after the DeleteTopics result returns success for -// all the brokers to become aware that the topics are gone. During this time, -// topic metadata and configuration may continue to return information about deleted topics. -// -// Requires broker version >= 0.10.1.0 -func (a *AdminClient) DeleteTopics(ctx context.Context, topics []string, options ...DeleteTopicsAdminOption) (result []TopicResult, err error) { - cTopics := make([]*C.rd_kafka_DeleteTopic_t, len(topics)) - - cErrstrSize := C.size_t(512) - cErrstr := (*C.char)(C.malloc(cErrstrSize)) - defer C.free(unsafe.Pointer(cErrstr)) - - // Convert Go DeleteTopics to C DeleteTopics - for i, topic := range topics { - cTopics[i] = C.rd_kafka_DeleteTopic_new(C.CString(topic)) - if cTopics[i] == nil { - return nil, newErrorFromString(ErrInvalidArg, - fmt.Sprintf("Invalid arguments for topic %s", topic)) - } - - defer C.rd_kafka_DeleteTopic_destroy(cTopics[i]) - } - - // Convert Go AdminOptions (if any) to C AdminOptions - genericOptions := make([]AdminOption, len(options)) - for i := range options { - genericOptions[i] = options[i] - } - cOptions, err := adminOptionsSetup(a.handle, C.RD_KAFKA_ADMIN_OP_DELETETOPICS, genericOptions) - if err != nil { - return nil, err - } - defer C.rd_kafka_AdminOptions_destroy(cOptions) - - // Create temporary queue for async operation - cQueue := C.rd_kafka_queue_new(a.handle.rk) - defer C.rd_kafka_queue_destroy(cQueue) - - // Asynchronous call - C.rd_kafka_DeleteTopics( - a.handle.rk, - (**C.rd_kafka_DeleteTopic_t)(&cTopics[0]), - C.size_t(len(cTopics)), - cOptions, - cQueue) - - // Wait for result, error or context timeout - rkev, err := a.waitResult(ctx, cQueue, C.RD_KAFKA_EVENT_DELETETOPICS_RESULT) - if err != nil { - return nil, err - } - defer C.rd_kafka_event_destroy(rkev) - - cRes := C.rd_kafka_event_DeleteTopics_result(rkev) - - // Convert result from C to Go - var cCnt C.size_t - cTopicRes := C.rd_kafka_DeleteTopics_result_topics(cRes, &cCnt) - - return a.cToTopicResults(cTopicRes, cCnt) -} - -// CreatePartitions creates additional partitions for topics. -func (a *AdminClient) CreatePartitions(ctx context.Context, partitions []PartitionsSpecification, options ...CreatePartitionsAdminOption) (result []TopicResult, err error) { - cParts := make([]*C.rd_kafka_NewPartitions_t, len(partitions)) - - cErrstrSize := C.size_t(512) - cErrstr := (*C.char)(C.malloc(cErrstrSize)) - defer C.free(unsafe.Pointer(cErrstr)) - - // Convert Go PartitionsSpecification to C NewPartitions - for i, part := range partitions { - cParts[i] = C.rd_kafka_NewPartitions_new(C.CString(part.Topic), C.size_t(part.IncreaseTo), cErrstr, cErrstrSize) - if cParts[i] == nil { - return nil, newErrorFromString(ErrInvalidArg, - fmt.Sprintf("Topic %s: %s", part.Topic, C.GoString(cErrstr))) - } - - defer C.rd_kafka_NewPartitions_destroy(cParts[i]) - - for pidx, replicas := range part.ReplicaAssignment { - cReplicas := make([]C.int32_t, len(replicas)) - for ri, replica := range replicas { - cReplicas[ri] = C.int32_t(replica) - } - cErr := C.rd_kafka_NewPartitions_set_replica_assignment( - cParts[i], C.int32_t(pidx), - (*C.int32_t)(&cReplicas[0]), C.size_t(len(cReplicas)), - cErrstr, cErrstrSize) - if cErr != 0 { - return nil, newCErrorFromString(cErr, - fmt.Sprintf("Failed to set replica assignment for topic %s new partition index %d: %s", part.Topic, pidx, C.GoString(cErrstr))) - } - } - - } - - // Convert Go AdminOptions (if any) to C AdminOptions - genericOptions := make([]AdminOption, len(options)) - for i := range options { - genericOptions[i] = options[i] - } - cOptions, err := adminOptionsSetup(a.handle, C.RD_KAFKA_ADMIN_OP_CREATEPARTITIONS, genericOptions) - if err != nil { - return nil, err - } - defer C.rd_kafka_AdminOptions_destroy(cOptions) - - // Create temporary queue for async operation - cQueue := C.rd_kafka_queue_new(a.handle.rk) - defer C.rd_kafka_queue_destroy(cQueue) - - // Asynchronous call - C.rd_kafka_CreatePartitions( - a.handle.rk, - (**C.rd_kafka_NewPartitions_t)(&cParts[0]), - C.size_t(len(cParts)), - cOptions, - cQueue) - - // Wait for result, error or context timeout - rkev, err := a.waitResult(ctx, cQueue, C.RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT) - if err != nil { - return nil, err - } - defer C.rd_kafka_event_destroy(rkev) - - cRes := C.rd_kafka_event_CreatePartitions_result(rkev) - - // Convert result from C to Go - var cCnt C.size_t - cTopicRes := C.rd_kafka_CreatePartitions_result_topics(cRes, &cCnt) - - return a.cToTopicResults(cTopicRes, cCnt) -} - -// AlterConfigs alters/updates cluster resource configuration. -// -// Updates are not transactional so they may succeed for a subset -// of the provided resources while others fail. -// The configuration for a particular resource is updated atomically, -// replacing values using the provided ConfigEntrys and reverting -// unspecified ConfigEntrys to their default values. -// -// Requires broker version >=0.11.0.0 -// -// AlterConfigs will replace all existing configuration for -// the provided resources with the new configuration given, -// reverting all other configuration to their default values. -// -// Multiple resources and resource types may be set, but at most one -// resource of type ResourceBroker is allowed per call since these -// resource requests must be sent to the broker specified in the resource. -func (a *AdminClient) AlterConfigs(ctx context.Context, resources []ConfigResource, options ...AlterConfigsAdminOption) (result []ConfigResourceResult, err error) { - cRes := make([]*C.rd_kafka_ConfigResource_t, len(resources)) - - cErrstrSize := C.size_t(512) - cErrstr := (*C.char)(C.malloc(cErrstrSize)) - defer C.free(unsafe.Pointer(cErrstr)) - - // Convert Go ConfigResources to C ConfigResources - for i, res := range resources { - cRes[i] = C.rd_kafka_ConfigResource_new( - C.rd_kafka_ResourceType_t(res.Type), C.CString(res.Name)) - if cRes[i] == nil { - return nil, newErrorFromString(ErrInvalidArg, - fmt.Sprintf("Invalid arguments for resource %v", res)) - } - - defer C.rd_kafka_ConfigResource_destroy(cRes[i]) - - for _, entry := range res.Config { - var cErr C.rd_kafka_resp_err_t - switch entry.Operation { - case AlterOperationSet: - cErr = C.rd_kafka_ConfigResource_set_config( - cRes[i], C.CString(entry.Name), C.CString(entry.Value)) - default: - panic(fmt.Sprintf("Invalid ConfigEntry.Operation: %v", entry.Operation)) - } - - if cErr != 0 { - return nil, - newCErrorFromString(cErr, - fmt.Sprintf("Failed to add configuration %s: %s", - entry, C.GoString(C.rd_kafka_err2str(cErr)))) - } - } - } - - // Convert Go AdminOptions (if any) to C AdminOptions - genericOptions := make([]AdminOption, len(options)) - for i := range options { - genericOptions[i] = options[i] - } - cOptions, err := adminOptionsSetup(a.handle, C.RD_KAFKA_ADMIN_OP_ALTERCONFIGS, genericOptions) - if err != nil { - return nil, err - } - defer C.rd_kafka_AdminOptions_destroy(cOptions) - - // Create temporary queue for async operation - cQueue := C.rd_kafka_queue_new(a.handle.rk) - defer C.rd_kafka_queue_destroy(cQueue) - - // Asynchronous call - C.rd_kafka_AlterConfigs( - a.handle.rk, - (**C.rd_kafka_ConfigResource_t)(&cRes[0]), - C.size_t(len(cRes)), - cOptions, - cQueue) - - // Wait for result, error or context timeout - rkev, err := a.waitResult(ctx, cQueue, C.RD_KAFKA_EVENT_ALTERCONFIGS_RESULT) - if err != nil { - return nil, err - } - defer C.rd_kafka_event_destroy(rkev) - - cResult := C.rd_kafka_event_AlterConfigs_result(rkev) - - // Convert results from C to Go - var cCnt C.size_t - cResults := C.rd_kafka_AlterConfigs_result_resources(cResult, &cCnt) - - return a.cConfigResourceToResult(cResults, cCnt) -} - -// DescribeConfigs retrieves configuration for cluster resources. -// -// The returned configuration includes default values, use -// ConfigEntryResult.IsDefault or ConfigEntryResult.Source to distinguish -// default values from manually configured settings. -// -// The value of config entries where .IsSensitive is true -// will always be nil to avoid disclosing sensitive -// information, such as security settings. -// -// Configuration entries where .IsReadOnly is true can't be modified -// (with AlterConfigs). -// -// Synonym configuration entries are returned if the broker supports -// it (broker version >= 1.1.0). See .Synonyms. -// -// Requires broker version >=0.11.0.0 -// -// Multiple resources and resource types may be requested, but at most -// one resource of type ResourceBroker is allowed per call -// since these resource requests must be sent to the broker specified -// in the resource. -func (a *AdminClient) DescribeConfigs(ctx context.Context, resources []ConfigResource, options ...DescribeConfigsAdminOption) (result []ConfigResourceResult, err error) { - cRes := make([]*C.rd_kafka_ConfigResource_t, len(resources)) - - cErrstrSize := C.size_t(512) - cErrstr := (*C.char)(C.malloc(cErrstrSize)) - defer C.free(unsafe.Pointer(cErrstr)) - - // Convert Go ConfigResources to C ConfigResources - for i, res := range resources { - cRes[i] = C.rd_kafka_ConfigResource_new( - C.rd_kafka_ResourceType_t(res.Type), C.CString(res.Name)) - if cRes[i] == nil { - return nil, newErrorFromString(ErrInvalidArg, - fmt.Sprintf("Invalid arguments for resource %v", res)) - } - - defer C.rd_kafka_ConfigResource_destroy(cRes[i]) - } - - // Convert Go AdminOptions (if any) to C AdminOptions - genericOptions := make([]AdminOption, len(options)) - for i := range options { - genericOptions[i] = options[i] - } - cOptions, err := adminOptionsSetup(a.handle, C.RD_KAFKA_ADMIN_OP_DESCRIBECONFIGS, genericOptions) - if err != nil { - return nil, err - } - defer C.rd_kafka_AdminOptions_destroy(cOptions) - - // Create temporary queue for async operation - cQueue := C.rd_kafka_queue_new(a.handle.rk) - defer C.rd_kafka_queue_destroy(cQueue) - - // Asynchronous call - C.rd_kafka_DescribeConfigs( - a.handle.rk, - (**C.rd_kafka_ConfigResource_t)(&cRes[0]), - C.size_t(len(cRes)), - cOptions, - cQueue) - - // Wait for result, error or context timeout - rkev, err := a.waitResult(ctx, cQueue, C.RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT) - if err != nil { - return nil, err - } - defer C.rd_kafka_event_destroy(rkev) - - cResult := C.rd_kafka_event_DescribeConfigs_result(rkev) - - // Convert results from C to Go - var cCnt C.size_t - cResults := C.rd_kafka_DescribeConfigs_result_resources(cResult, &cCnt) - - return a.cConfigResourceToResult(cResults, cCnt) -} - -// GetMetadata queries broker for cluster and topic metadata. -// If topic is non-nil only information about that topic is returned, else if -// allTopics is false only information about locally used topics is returned, -// else information about all topics is returned. -// GetMetadata is equivalent to listTopics, describeTopics and describeCluster in the Java API. -func (a *AdminClient) GetMetadata(topic *string, allTopics bool, timeoutMs int) (*Metadata, error) { - return getMetadata(a, topic, allTopics, timeoutMs) -} - -// String returns a human readable name for an AdminClient instance -func (a *AdminClient) String() string { - return fmt.Sprintf("admin-%s", a.handle.String()) -} - -// get_handle implements the Handle interface -func (a *AdminClient) gethandle() *handle { - return a.handle -} - -// SetOAuthBearerToken sets the the data to be transmitted -// to a broker during SASL/OAUTHBEARER authentication. It will return nil -// on success, otherwise an error if: -// 1) the token data is invalid (meaning an expiration time in the past -// or either a token value or an extension key or value that does not meet -// the regular expression requirements as per -// https://tools.ietf.org/html/rfc7628#section-3.1); -// 2) SASL/OAUTHBEARER is not supported by the underlying librdkafka build; -// 3) SASL/OAUTHBEARER is supported but is not configured as the client's -// authentication mechanism. -func (a *AdminClient) SetOAuthBearerToken(oauthBearerToken OAuthBearerToken) error { - return a.handle.setOAuthBearerToken(oauthBearerToken) -} - -// SetOAuthBearerTokenFailure sets the error message describing why token -// retrieval/setting failed; it also schedules a new token refresh event for 10 -// seconds later so the attempt may be retried. It will return nil on -// success, otherwise an error if: -// 1) SASL/OAUTHBEARER is not supported by the underlying librdkafka build; -// 2) SASL/OAUTHBEARER is supported but is not configured as the client's -// authentication mechanism. -func (a *AdminClient) SetOAuthBearerTokenFailure(errstr string) error { - return a.handle.setOAuthBearerTokenFailure(errstr) -} - -// aclBindingToC converts a Go ACLBinding struct to a C rd_kafka_AclBinding_t -func (a *AdminClient) aclBindingToC(aclBinding *ACLBinding, cErrstr *C.char, cErrstrSize C.size_t) (result *C.rd_kafka_AclBinding_t, err error) { - var cName, cPrincipal, cHost *C.char - cName, cPrincipal, cHost = nil, nil, nil - if len(aclBinding.Name) > 0 { - cName = C.CString(aclBinding.Name) - defer C.free(unsafe.Pointer(cName)) - } - if len(aclBinding.Principal) > 0 { - cPrincipal = C.CString(aclBinding.Principal) - defer C.free(unsafe.Pointer(cPrincipal)) - } - if len(aclBinding.Host) > 0 { - cHost = C.CString(aclBinding.Host) - defer C.free(unsafe.Pointer(cHost)) - } - - result = C.rd_kafka_AclBinding_new( - C.rd_kafka_ResourceType_t(aclBinding.Type), - cName, - C.rd_kafka_ResourcePatternType_t(aclBinding.ResourcePatternType), - cPrincipal, - cHost, - C.rd_kafka_AclOperation_t(aclBinding.Operation), - C.rd_kafka_AclPermissionType_t(aclBinding.PermissionType), - cErrstr, - cErrstrSize, - ) - if result == nil { - err = newErrorFromString(ErrInvalidArg, - fmt.Sprintf("Invalid arguments for ACL binding %v: %v", aclBinding, C.GoString(cErrstr))) - } - return -} - -// aclBindingFilterToC converts a Go ACLBindingFilter struct to a C rd_kafka_AclBindingFilter_t -func (a *AdminClient) aclBindingFilterToC(aclBindingFilter *ACLBindingFilter, cErrstr *C.char, cErrstrSize C.size_t) (result *C.rd_kafka_AclBindingFilter_t, err error) { - var cName, cPrincipal, cHost *C.char - cName, cPrincipal, cHost = nil, nil, nil - if len(aclBindingFilter.Name) > 0 { - cName = C.CString(aclBindingFilter.Name) - defer C.free(unsafe.Pointer(cName)) - } - if len(aclBindingFilter.Principal) > 0 { - cPrincipal = C.CString(aclBindingFilter.Principal) - defer C.free(unsafe.Pointer(cPrincipal)) - } - if len(aclBindingFilter.Host) > 0 { - cHost = C.CString(aclBindingFilter.Host) - defer C.free(unsafe.Pointer(cHost)) - } - - result = C.rd_kafka_AclBindingFilter_new( - C.rd_kafka_ResourceType_t(aclBindingFilter.Type), - cName, - C.rd_kafka_ResourcePatternType_t(aclBindingFilter.ResourcePatternType), - cPrincipal, - cHost, - C.rd_kafka_AclOperation_t(aclBindingFilter.Operation), - C.rd_kafka_AclPermissionType_t(aclBindingFilter.PermissionType), - cErrstr, - cErrstrSize, - ) - if result == nil { - err = newErrorFromString(ErrInvalidArg, - fmt.Sprintf("Invalid arguments for ACL binding filter %v: %v", aclBindingFilter, C.GoString(cErrstr))) - } - return -} - -// cToACLBinding converts a C rd_kafka_AclBinding_t to Go ACLBinding -func (a *AdminClient) cToACLBinding(cACLBinding *C.rd_kafka_AclBinding_t) ACLBinding { - return ACLBinding{ - ResourceType(C.rd_kafka_AclBinding_restype(cACLBinding)), - C.GoString(C.rd_kafka_AclBinding_name(cACLBinding)), - ResourcePatternType(C.rd_kafka_AclBinding_resource_pattern_type(cACLBinding)), - C.GoString(C.rd_kafka_AclBinding_principal(cACLBinding)), - C.GoString(C.rd_kafka_AclBinding_host(cACLBinding)), - ACLOperation(C.rd_kafka_AclBinding_operation(cACLBinding)), - ACLPermissionType(C.rd_kafka_AclBinding_permission_type(cACLBinding)), - } -} - -// cToACLBindings converts a C rd_kafka_AclBinding_t list to Go ACLBindings -func (a *AdminClient) cToACLBindings(cACLBindings **C.rd_kafka_AclBinding_t, aclCnt C.size_t) (result ACLBindings) { - result = make(ACLBindings, aclCnt) - for i := uint(0); i < uint(aclCnt); i++ { - cACLBinding := C.AclBinding_by_idx(cACLBindings, aclCnt, C.size_t(i)) - if cACLBinding == nil { - panic("AclBinding_by_idx must not return nil") - } - result[i] = a.cToACLBinding(cACLBinding) - } - return -} - -// cToCreateACLResults converts a C acl_result_t array to Go CreateACLResult list. -func (a *AdminClient) cToCreateACLResults(cCreateAclsRes **C.rd_kafka_acl_result_t, aclCnt C.size_t) (result []CreateACLResult, err error) { - result = make([]CreateACLResult, uint(aclCnt)) - - for i := uint(0); i < uint(aclCnt); i++ { - cCreateACLRes := C.acl_result_by_idx(cCreateAclsRes, aclCnt, C.size_t(i)) - if cCreateACLRes != nil { - cCreateACLError := C.rd_kafka_acl_result_error(cCreateACLRes) - result[i].Error = newErrorFromCError(cCreateACLError) - } - } - - return result, nil -} - -// cToDescribeACLsResult converts a C rd_kafka_event_t to a Go DescribeAclsResult struct. -func (a *AdminClient) cToDescribeACLsResult(rkev *C.rd_kafka_event_t) (result *DescribeACLsResult) { - result = &DescribeACLsResult{} - err := C.rd_kafka_event_error(rkev) - errCode := ErrorCode(err) - errStr := C.rd_kafka_event_error_string(rkev) - - var cResultACLsCount C.size_t - cResult := C.rd_kafka_event_DescribeAcls_result(rkev) - cResultACLs := C.rd_kafka_DescribeAcls_result_acls(cResult, &cResultACLsCount) - if errCode != ErrNoError { - result.Error = newErrorFromCString(err, errStr) - } - result.ACLBindings = a.cToACLBindings(cResultACLs, cResultACLsCount) - return -} - -// cToDeleteACLsResults converts a C rd_kafka_DeleteAcls_result_response_t array to Go DeleteAclsResult slice. -func (a *AdminClient) cToDeleteACLsResults(cDeleteACLsResResponse **C.rd_kafka_DeleteAcls_result_response_t, resResponseCnt C.size_t) (result []DeleteACLsResult) { - result = make([]DeleteACLsResult, uint(resResponseCnt)) - - for i := uint(0); i < uint(resResponseCnt); i++ { - cDeleteACLsResResponse := C.DeleteAcls_result_response_by_idx(cDeleteACLsResResponse, resResponseCnt, C.size_t(i)) - if cDeleteACLsResResponse == nil { - panic("DeleteAcls_result_response_by_idx must not return nil") - } - - cDeleteACLsError := C.rd_kafka_DeleteAcls_result_response_error(cDeleteACLsResResponse) - result[i].Error = newErrorFromCError(cDeleteACLsError) - - var cMatchingACLsCount C.size_t - cMatchingACLs := C.rd_kafka_DeleteAcls_result_response_matching_acls( - cDeleteACLsResResponse, &cMatchingACLsCount) - - result[i].ACLBindings = a.cToACLBindings(cMatchingACLs, cMatchingACLsCount) - } - return -} - -// CreateACLs creates one or more ACL bindings. -// -// Parameters: -// * `ctx` - context with the maximum amount of time to block, or nil for indefinite. -// * `aclBindings` - A slice of ACL binding specifications to create. -// * `options` - Create ACLs options -// -// Returns a slice of CreateACLResult with a ErrNoError ErrorCode when the operation was successful -// plus an error that is not nil for client level errors -func (a *AdminClient) CreateACLs(ctx context.Context, aclBindings ACLBindings, options ...CreateACLsAdminOption) (result []CreateACLResult, err error) { - if aclBindings == nil { - return nil, newErrorFromString(ErrInvalidArg, - "Expected non-nil slice of ACLBinding structs") - } - if len(aclBindings) == 0 { - return nil, newErrorFromString(ErrInvalidArg, - "Expected non-empty slice of ACLBinding structs") - } - - cErrstrSize := C.size_t(512) - cErrstr := (*C.char)(C.malloc(cErrstrSize)) - defer C.free(unsafe.Pointer(cErrstr)) - - cACLBindings := make([]*C.rd_kafka_AclBinding_t, len(aclBindings)) - - for i, aclBinding := range aclBindings { - cACLBindings[i], err = a.aclBindingToC(&aclBinding, cErrstr, cErrstrSize) - if err != nil { - return - } - defer C.rd_kafka_AclBinding_destroy(cACLBindings[i]) - } - - // Convert Go AdminOptions (if any) to C AdminOptions - genericOptions := make([]AdminOption, len(options)) - for i := range options { - genericOptions[i] = options[i] - } - cOptions, err := adminOptionsSetup(a.handle, C.RD_KAFKA_ADMIN_OP_CREATEACLS, genericOptions) - if err != nil { - return nil, err - } - - // Create temporary queue for async operation - cQueue := C.rd_kafka_queue_new(a.handle.rk) - defer C.rd_kafka_queue_destroy(cQueue) - - // Asynchronous call - C.rd_kafka_CreateAcls( - a.handle.rk, - (**C.rd_kafka_AclBinding_t)(&cACLBindings[0]), - C.size_t(len(cACLBindings)), - cOptions, - cQueue) - - // Wait for result, error or context timeout - rkev, err := a.waitResult(ctx, cQueue, C.RD_KAFKA_EVENT_CREATEACLS_RESULT) - if err != nil { - return nil, err - } - defer C.rd_kafka_event_destroy(rkev) - - var cResultCnt C.size_t - cResult := C.rd_kafka_event_CreateAcls_result(rkev) - aclResults := C.rd_kafka_CreateAcls_result_acls(cResult, &cResultCnt) - result, err = a.cToCreateACLResults(aclResults, cResultCnt) - return -} - -// DescribeACLs matches ACL bindings by filter. -// -// Parameters: -// * `ctx` - context with the maximum amount of time to block, or nil for indefinite. -// * `aclBindingFilter` - A filter with attributes that must match. -// string attributes match exact values or any string if set to empty string. -// Enum attributes match exact values or any value if ending with `Any`. -// If `ResourcePatternType` is set to `ResourcePatternTypeMatch` returns ACL bindings with: -// - `ResourcePatternTypeLiteral` pattern type with resource name equal to the given resource name -// - `ResourcePatternTypeLiteral` pattern type with wildcard resource name that matches the given resource name -// - `ResourcePatternTypePrefixed` pattern type with resource name that is a prefix of the given resource name -// * `options` - Describe ACLs options -// -// Returns a slice of ACLBindings when the operation was successful -// plus an error that is not `nil` for client level errors -func (a *AdminClient) DescribeACLs(ctx context.Context, aclBindingFilter ACLBindingFilter, options ...DescribeACLsAdminOption) (result *DescribeACLsResult, err error) { - - cErrstrSize := C.size_t(512) - cErrstr := (*C.char)(C.malloc(cErrstrSize)) - defer C.free(unsafe.Pointer(cErrstr)) - - cACLBindingFilter, err := a.aclBindingFilterToC(&aclBindingFilter, cErrstr, cErrstrSize) - if err != nil { - return - } - - // Convert Go AdminOptions (if any) to C AdminOptions - genericOptions := make([]AdminOption, len(options)) - for i := range options { - genericOptions[i] = options[i] - } - cOptions, err := adminOptionsSetup(a.handle, C.RD_KAFKA_ADMIN_OP_DESCRIBEACLS, genericOptions) - if err != nil { - return nil, err - } - // Create temporary queue for async operation - cQueue := C.rd_kafka_queue_new(a.handle.rk) - defer C.rd_kafka_queue_destroy(cQueue) - - // Asynchronous call - C.rd_kafka_DescribeAcls( - a.handle.rk, - cACLBindingFilter, - cOptions, - cQueue) - - // Wait for result, error or context timeout - rkev, err := a.waitResult(ctx, cQueue, C.RD_KAFKA_EVENT_DESCRIBEACLS_RESULT) - if err != nil { - return nil, err - } - defer C.rd_kafka_event_destroy(rkev) - result = a.cToDescribeACLsResult(rkev) - return -} - -// DeleteACLs deletes ACL bindings matching one or more ACL binding filters. -// -// Parameters: -// * `ctx` - context with the maximum amount of time to block, or nil for indefinite. -// * `aclBindingFilters` - a slice of ACL binding filters to match ACLs to delete. -// string attributes match exact values or any string if set to empty string. -// Enum attributes match exact values or any value if ending with `Any`. -// If `ResourcePatternType` is set to `ResourcePatternTypeMatch` deletes ACL bindings with: -// - `ResourcePatternTypeLiteral` pattern type with resource name equal to the given resource name -// - `ResourcePatternTypeLiteral` pattern type with wildcard resource name that matches the given resource name -// - `ResourcePatternTypePrefixed` pattern type with resource name that is a prefix of the given resource name -// * `options` - Delete ACLs options -// -// Returns a slice of ACLBinding for each filter when the operation was successful -// plus an error that is not `nil` for client level errors -func (a *AdminClient) DeleteACLs(ctx context.Context, aclBindingFilters ACLBindingFilters, options ...DeleteACLsAdminOption) (result []DeleteACLsResult, err error) { - if aclBindingFilters == nil { - return nil, newErrorFromString(ErrInvalidArg, - "Expected non-nil slice of ACLBindingFilter structs") - } - if len(aclBindingFilters) == 0 { - return nil, newErrorFromString(ErrInvalidArg, - "Expected non-empty slice of ACLBindingFilter structs") - } - - cErrstrSize := C.size_t(512) - cErrstr := (*C.char)(C.malloc(cErrstrSize)) - defer C.free(unsafe.Pointer(cErrstr)) - - cACLBindingFilters := make([]*C.rd_kafka_AclBindingFilter_t, len(aclBindingFilters)) - - for i, aclBindingFilter := range aclBindingFilters { - cACLBindingFilters[i], err = a.aclBindingFilterToC(&aclBindingFilter, cErrstr, cErrstrSize) - if err != nil { - return - } - defer C.rd_kafka_AclBinding_destroy(cACLBindingFilters[i]) - } - - // Convert Go AdminOptions (if any) to C AdminOptions - genericOptions := make([]AdminOption, len(options)) - for i := range options { - genericOptions[i] = options[i] - } - cOptions, err := adminOptionsSetup(a.handle, C.RD_KAFKA_ADMIN_OP_DELETEACLS, genericOptions) - if err != nil { - return nil, err - } - // Create temporary queue for async operation - cQueue := C.rd_kafka_queue_new(a.handle.rk) - defer C.rd_kafka_queue_destroy(cQueue) - - // Asynchronous call - C.rd_kafka_DeleteAcls( - a.handle.rk, - (**C.rd_kafka_AclBindingFilter_t)(&cACLBindingFilters[0]), - C.size_t(len(cACLBindingFilters)), - cOptions, - cQueue) - - // Wait for result, error or context timeout - rkev, err := a.waitResult(ctx, cQueue, C.RD_KAFKA_EVENT_DELETEACLS_RESULT) - if err != nil { - return nil, err - } - defer C.rd_kafka_event_destroy(rkev) - - var cResultResponsesCount C.size_t - cResult := C.rd_kafka_event_DeleteAcls_result(rkev) - cResultResponses := C.rd_kafka_DeleteAcls_result_responses(cResult, &cResultResponsesCount) - result = a.cToDeleteACLsResults(cResultResponses, cResultResponsesCount) - return -} - -// Close an AdminClient instance. -func (a *AdminClient) Close() { - if a.isDerived { - // Derived AdminClient needs no cleanup. - a.handle = &handle{} - return - } - - a.handle.cleanup() - - C.rd_kafka_destroy(a.handle.rk) -} - -// NewAdminClient creats a new AdminClient instance with a new underlying client instance -func NewAdminClient(conf *ConfigMap) (*AdminClient, error) { - - err := versionCheck() - if err != nil { - return nil, err - } - - a := &AdminClient{} - a.handle = &handle{} - - // Convert ConfigMap to librdkafka conf_t - cConf, err := conf.convert() - if err != nil { - return nil, err - } - - cErrstr := (*C.char)(C.malloc(C.size_t(256))) - defer C.free(unsafe.Pointer(cErrstr)) - - C.rd_kafka_conf_set_events(cConf, C.RD_KAFKA_EVENT_STATS|C.RD_KAFKA_EVENT_ERROR|C.RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH) - - // Create librdkafka producer instance. The Producer is somewhat cheaper than - // the consumer, but any instance type can be used for Admin APIs. - a.handle.rk = C.rd_kafka_new(C.RD_KAFKA_PRODUCER, cConf, cErrstr, 256) - if a.handle.rk == nil { - return nil, newErrorFromCString(C.RD_KAFKA_RESP_ERR__INVALID_ARG, cErrstr) - } - - a.isDerived = false - a.handle.setup() - - return a, nil -} - -// NewAdminClientFromProducer derives a new AdminClient from an existing Producer instance. -// The AdminClient will use the same configuration and connections as the parent instance. -func NewAdminClientFromProducer(p *Producer) (a *AdminClient, err error) { - if p.handle.rk == nil { - return nil, newErrorFromString(ErrInvalidArg, "Can't derive AdminClient from closed producer") - } - - a = &AdminClient{} - a.handle = &p.handle - a.isDerived = true - return a, nil -} - -// NewAdminClientFromConsumer derives a new AdminClient from an existing Consumer instance. -// The AdminClient will use the same configuration and connections as the parent instance. -func NewAdminClientFromConsumer(c *Consumer) (a *AdminClient, err error) { - if c.handle.rk == nil { - return nil, newErrorFromString(ErrInvalidArg, "Can't derive AdminClient from closed consumer") - } - - a = &AdminClient{} - a.handle = &c.handle - a.isDerived = true - return a, nil -} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/adminoptions.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/adminoptions.go deleted file mode 100644 index db55d2dc..00000000 --- a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/adminoptions.go +++ /dev/null @@ -1,297 +0,0 @@ -/** - * Copyright 2018 Confluent Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package kafka - -import ( - "fmt" - "time" - "unsafe" -) - -/* -#include "select_rdkafka.h" -#include -*/ -import "C" - -// AdminOptionOperationTimeout sets the broker's operation timeout, such as the -// timeout for CreateTopics to complete the creation of topics on the controller -// before returning a result to the application. -// -// CreateTopics, DeleteTopics, CreatePartitions: -// a value 0 will return immediately after triggering topic -// creation, while > 0 will wait this long for topic creation to propagate -// in cluster. -// -// Default: 0 (return immediately). -// -// Valid for CreateTopics, DeleteTopics, CreatePartitions. -type AdminOptionOperationTimeout struct { - isSet bool - val time.Duration -} - -func (ao AdminOptionOperationTimeout) supportsCreateTopics() { -} -func (ao AdminOptionOperationTimeout) supportsDeleteTopics() { -} -func (ao AdminOptionOperationTimeout) supportsCreatePartitions() { -} - -func (ao AdminOptionOperationTimeout) apply(cOptions *C.rd_kafka_AdminOptions_t) error { - if !ao.isSet { - return nil - } - - cErrstrSize := C.size_t(512) - cErrstr := (*C.char)(C.malloc(cErrstrSize)) - defer C.free(unsafe.Pointer(cErrstr)) - - cErr := C.rd_kafka_AdminOptions_set_operation_timeout( - cOptions, C.int(durationToMilliseconds(ao.val)), - cErrstr, cErrstrSize) - if cErr != 0 { - C.rd_kafka_AdminOptions_destroy(cOptions) - return newCErrorFromString(cErr, - fmt.Sprintf("Failed to set operation timeout: %s", C.GoString(cErrstr))) - - } - - return nil -} - -// SetAdminOperationTimeout sets the broker's operation timeout, such as the -// timeout for CreateTopics to complete the creation of topics on the controller -// before returning a result to the application. -// -// CreateTopics, DeleteTopics, CreatePartitions: -// a value 0 will return immediately after triggering topic -// creation, while > 0 will wait this long for topic creation to propagate -// in cluster. -// -// Default: 0 (return immediately). -// -// Valid for CreateTopics, DeleteTopics, CreatePartitions. -func SetAdminOperationTimeout(t time.Duration) (ao AdminOptionOperationTimeout) { - ao.isSet = true - ao.val = t - return ao -} - -// AdminOptionRequestTimeout sets the overall request timeout, including broker -// lookup, request transmission, operation time on broker, and response. -// -// Default: `socket.timeout.ms`. -// -// Valid for all Admin API methods. -type AdminOptionRequestTimeout struct { - isSet bool - val time.Duration -} - -func (ao AdminOptionRequestTimeout) supportsCreateTopics() { -} -func (ao AdminOptionRequestTimeout) supportsDeleteTopics() { -} -func (ao AdminOptionRequestTimeout) supportsCreatePartitions() { -} -func (ao AdminOptionRequestTimeout) supportsAlterConfigs() { -} -func (ao AdminOptionRequestTimeout) supportsDescribeConfigs() { -} - -func (ao AdminOptionRequestTimeout) apply(cOptions *C.rd_kafka_AdminOptions_t) error { - if !ao.isSet { - return nil - } - - cErrstrSize := C.size_t(512) - cErrstr := (*C.char)(C.malloc(cErrstrSize)) - defer C.free(unsafe.Pointer(cErrstr)) - - cErr := C.rd_kafka_AdminOptions_set_request_timeout( - cOptions, C.int(durationToMilliseconds(ao.val)), - cErrstr, cErrstrSize) - if cErr != 0 { - C.rd_kafka_AdminOptions_destroy(cOptions) - return newCErrorFromString(cErr, - fmt.Sprintf("%s", C.GoString(cErrstr))) - - } - - return nil -} - -// SetAdminRequestTimeout sets the overall request timeout, including broker -// lookup, request transmission, operation time on broker, and response. -// -// Default: `socket.timeout.ms`. -// -// Valid for all Admin API methods. -func SetAdminRequestTimeout(t time.Duration) (ao AdminOptionRequestTimeout) { - ao.isSet = true - ao.val = t - return ao -} - -// AdminOptionValidateOnly tells the broker to only validate the request, -// without performing the requested operation (create topics, etc). -// -// Default: false. -// -// Valid for CreateTopics, CreatePartitions, AlterConfigs -type AdminOptionValidateOnly struct { - isSet bool - val bool -} - -func (ao AdminOptionValidateOnly) supportsCreateTopics() { -} -func (ao AdminOptionValidateOnly) supportsCreatePartitions() { -} -func (ao AdminOptionValidateOnly) supportsAlterConfigs() { -} - -func (ao AdminOptionRequestTimeout) supportsCreateACLs() { -} - -func (ao AdminOptionRequestTimeout) supportsDescribeACLs() { -} - -func (ao AdminOptionRequestTimeout) supportsDeleteACLs() { -} - -func (ao AdminOptionValidateOnly) apply(cOptions *C.rd_kafka_AdminOptions_t) error { - if !ao.isSet { - return nil - } - - cErrstrSize := C.size_t(512) - cErrstr := (*C.char)(C.malloc(cErrstrSize)) - defer C.free(unsafe.Pointer(cErrstr)) - - cErr := C.rd_kafka_AdminOptions_set_validate_only( - cOptions, bool2cint(ao.val), - cErrstr, cErrstrSize) - if cErr != 0 { - C.rd_kafka_AdminOptions_destroy(cOptions) - return newCErrorFromString(cErr, - fmt.Sprintf("%s", C.GoString(cErrstr))) - - } - - return nil -} - -// SetAdminValidateOnly tells the broker to only validate the request, -// without performing the requested operation (create topics, etc). -// -// Default: false. -// -// Valid for CreateTopics, DeleteTopics, CreatePartitions, AlterConfigs -func SetAdminValidateOnly(validateOnly bool) (ao AdminOptionValidateOnly) { - ao.isSet = true - ao.val = validateOnly - return ao -} - -// CreateTopicsAdminOption - see setters. -// -// See SetAdminRequestTimeout, SetAdminOperationTimeout, SetAdminValidateOnly. -type CreateTopicsAdminOption interface { - supportsCreateTopics() - apply(cOptions *C.rd_kafka_AdminOptions_t) error -} - -// DeleteTopicsAdminOption - see setters. -// -// See SetAdminRequestTimeout, SetAdminOperationTimeout. -type DeleteTopicsAdminOption interface { - supportsDeleteTopics() - apply(cOptions *C.rd_kafka_AdminOptions_t) error -} - -// CreatePartitionsAdminOption - see setters. -// -// See SetAdminRequestTimeout, SetAdminOperationTimeout, SetAdminValidateOnly. -type CreatePartitionsAdminOption interface { - supportsCreatePartitions() - apply(cOptions *C.rd_kafka_AdminOptions_t) error -} - -// AlterConfigsAdminOption - see setters. -// -// See SetAdminRequestTimeout, SetAdminValidateOnly, SetAdminIncremental. -type AlterConfigsAdminOption interface { - supportsAlterConfigs() - apply(cOptions *C.rd_kafka_AdminOptions_t) error -} - -// DescribeConfigsAdminOption - see setters. -// -// See SetAdminRequestTimeout. -type DescribeConfigsAdminOption interface { - supportsDescribeConfigs() - apply(cOptions *C.rd_kafka_AdminOptions_t) error -} - -// CreateACLsAdminOption - see setter. -// -// See SetAdminRequestTimeout -type CreateACLsAdminOption interface { - supportsCreateACLs() - apply(cOptions *C.rd_kafka_AdminOptions_t) error -} - -// DescribeACLsAdminOption - see setter. -// -// See SetAdminRequestTimeout -type DescribeACLsAdminOption interface { - supportsDescribeACLs() - apply(cOptions *C.rd_kafka_AdminOptions_t) error -} - -// DeleteACLsAdminOption - see setter. -// -// See SetAdminRequestTimeout -type DeleteACLsAdminOption interface { - supportsDeleteACLs() - apply(cOptions *C.rd_kafka_AdminOptions_t) error -} - -// AdminOption is a generic type not to be used directly. -// -// See CreateTopicsAdminOption et.al. -type AdminOption interface { - apply(cOptions *C.rd_kafka_AdminOptions_t) error -} - -func adminOptionsSetup(h *handle, opType C.rd_kafka_admin_op_t, options []AdminOption) (*C.rd_kafka_AdminOptions_t, error) { - - cOptions := C.rd_kafka_AdminOptions_new(h.rk, opType) - for _, opt := range options { - if opt == nil { - continue - } - err := opt.apply(cOptions) - if err != nil { - return nil, err - } - } - - return cOptions, nil -} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/api.html b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/api.html deleted file mode 100644 index d38ca2e0..00000000 --- a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/api.html +++ /dev/null @@ -1,5433 +0,0 @@ - - - - - - - - kafka - Go Documentation Server - - - - - - - - -
- ... -
- -
-
-

- Package kafka - - -

- - - - -
-
-
- - import "github.com/confluentinc/confluent-kafka-go/kafka" - -
-
-
-
- - Overview - -
-
- - Index - -
-
-
-
-
- -
- -
-

- Overview ▾ -

-

- Package kafka provides high-level Apache Kafka producer and consumers -using bindings on-top of the librdkafka C library. -

-

- High-level Consumer -

-

- * Decide if you want to read messages and events by calling `.Poll()` or -the deprecated option of using the `.Events()` channel. (If you want to use -`.Events()` channel then set `"go.events.channel.enable": true`). -

-

- * Create a Consumer with `kafka.NewConsumer()` providing at -least the `bootstrap.servers` and `group.id` configuration properties. -

-

- * Call `.Subscribe()` or (`.SubscribeTopics()` to subscribe to multiple topics) -to join the group with the specified subscription set. -Subscriptions are atomic, calling `.Subscribe*()` again will leave -the group and rejoin with the new set of topics. -

-

- * Start reading events and messages from either the `.Events` channel -or by calling `.Poll()`. -

-

- * When the group has rebalanced each client member is assigned a -(sub-)set of topic+partitions. -By default the consumer will start fetching messages for its assigned -partitions at this point, but your application may enable rebalance -events to get an insight into what the assigned partitions where -as well as set the initial offsets. To do this you need to pass -`"go.application.rebalance.enable": true` to the `NewConsumer()` call -mentioned above. You will (eventually) see a `kafka.AssignedPartitions` event -with the assigned partition set. You can optionally modify the initial -offsets (they'll default to stored offsets and if there are no previously stored -offsets it will fall back to `"auto.offset.reset"` -which defaults to the `latest` message) and then call `.Assign(partitions)` -to start consuming. If you don't need to modify the initial offsets you will -not need to call `.Assign()`, the client will do so automatically for you if -you dont, unless you are using the channel-based consumer in which case -you MUST call `.Assign()` when receiving the `AssignedPartitions` and -`RevokedPartitions` events. -

-

- * As messages are fetched they will be made available on either the -`.Events` channel or by calling `.Poll()`, look for event type `*kafka.Message`. -

-

- * Handle messages, events and errors to your liking. -

-

- * When you are done consuming call `.Close()` to commit final offsets -and leave the consumer group. -

-

- Producer -

-

- * Create a Producer with `kafka.NewProducer()` providing at least -the `bootstrap.servers` configuration properties. -

-

- * Messages may now be produced either by sending a `*kafka.Message` -on the `.ProduceChannel` or by calling `.Produce()`. -

-

- * Producing is an asynchronous operation so the client notifies the application -of per-message produce success or failure through something called delivery reports. -Delivery reports are by default emitted on the `.Events()` channel as `*kafka.Message` -and you should check `msg.TopicPartition.Error` for `nil` to find out if the message -was succesfully delivered or not. -It is also possible to direct delivery reports to alternate channels -by providing a non-nil `chan Event` channel to `.Produce()`. -If no delivery reports are wanted they can be completely disabled by -setting configuration property `"go.delivery.reports": false`. -

-

- * When you are done producing messages you will need to make sure all messages -are indeed delivered to the broker (or failed), remember that this is -an asynchronous client so some of your messages may be lingering in internal -channels or tranmission queues. -To do this you can either keep track of the messages you've produced -and wait for their corresponding delivery reports, or call the convenience -function `.Flush()` that will block until all message deliveries are done -or the provided timeout elapses. -

-

- * Finally call `.Close()` to decommission the producer. -

-

- Transactional producer API -

-

- The transactional producer operates on top of the idempotent producer, -and provides full exactly-once semantics (EOS) for Apache Kafka when used -with the transaction aware consumer (`isolation.level=read_committed`). -

-

- A producer instance is configured for transactions by setting the -`transactional.id` to an identifier unique for the application. This -id will be used to fence stale transactions from previous instances of -the application, typically following an outage or crash. -

-

- After creating the transactional producer instance using `NewProducer()` -the transactional state must be initialized by calling -`InitTransactions()`. This is a blocking call that will -acquire a runtime producer id from the transaction coordinator broker -as well as abort any stale transactions and fence any still running producer -instances with the same `transactional.id`. -

-

- Once transactions are initialized the application may begin a new -transaction by calling `BeginTransaction()`. -A producer instance may only have one single on-going transaction. -

-

- Any messages produced after the transaction has been started will -belong to the ongoing transaction and will be committed or aborted -atomically. -It is not permitted to produce messages outside a transaction -boundary, e.g., before `BeginTransaction()` or after `CommitTransaction()`, -`AbortTransaction()` or if the current transaction has failed. -

-

- If consumed messages are used as input to the transaction, the consumer -instance must be configured with `enable.auto.commit` set to `false`. -To commit the consumed offsets along with the transaction pass the -list of consumed partitions and the last offset processed + 1 to -`SendOffsetsToTransaction()` prior to committing the transaction. -This allows an aborted transaction to be restarted using the previously -committed offsets. -

-

- To commit the produced messages, and any consumed offsets, to the -current transaction, call `CommitTransaction()`. -This call will block until the transaction has been fully committed or -failed (typically due to fencing by a newer producer instance). -

-

- Alternatively, if processing fails, or an abortable transaction error is -raised, the transaction needs to be aborted by calling -`AbortTransaction()` which marks any produced messages and -offset commits as aborted. -

-

- After the current transaction has been committed or aborted a new -transaction may be started by calling `BeginTransaction()` again. -

-

- Retriable errors: -Some error cases allow the attempted operation to be retried, this is -indicated by the error object having the retriable flag set which can -be detected by calling `err.(kafka.Error).IsRetriable()`. -When this flag is set the application may retry the operation immediately -or preferably after a shorter grace period (to avoid busy-looping). -Retriable errors include timeouts, broker transport failures, etc. -

-

- Abortable errors: -An ongoing transaction may fail permanently due to various errors, -such as transaction coordinator becoming unavailable, write failures to the -Apache Kafka log, under-replicated partitions, etc. -At this point the producer application must abort the current transaction -using `AbortTransaction()` and optionally start a new transaction -by calling `BeginTransaction()`. -Whether an error is abortable or not is detected by calling -`err.(kafka.Error).TxnRequiresAbort()` on the returned error object. -

-

- Fatal errors: -While the underlying idempotent producer will typically only raise -fatal errors for unrecoverable cluster errors where the idempotency -guarantees can't be maintained, most of these are treated as abortable by -the transactional producer since transactions may be aborted and retried -in their entirety; -The transactional producer on the other hand introduces a set of additional -fatal errors which the application needs to handle by shutting down the -producer and terminate. There is no way for a producer instance to recover -from fatal errors. -Whether an error is fatal or not is detected by calling -`err.(kafka.Error).IsFatal()` on the returned error object or by checking -the global `GetFatalError()`. -

-

- Handling of other errors: -For errors that have neither retriable, abortable or the fatal flag set -it is not always obvious how to handle them. While some of these errors -may be indicative of bugs in the application code, such as when -an invalid parameter is passed to a method, other errors might originate -from the broker and be passed thru as-is to the application. -The general recommendation is to treat these errors, that have -neither the retriable or abortable flags set, as fatal. -

-

- Error handling example: -

-
retry:
-
-   err := producer.CommitTransaction(...)
-   if err == nil {
-       return nil
-   } else if err.(kafka.Error).TxnRequiresAbort() {
-       do_abort_transaction_and_reset_inputs()
-   } else if err.(kafka.Error).IsRetriable() {
-       goto retry
-   } else { // treat all other errors as fatal errors
-       panic(err)
-   }
-
-

- Events -

-

- Apart from emitting messages and delivery reports the client also communicates -with the application through a number of different event types. -An application may choose to handle or ignore these events. -

-

- Consumer events -

-

- * `*kafka.Message` - a fetched message. -

-

- * `AssignedPartitions` - The assigned partition set for this client following a rebalance. -Requires `go.application.rebalance.enable` -

-

- * `RevokedPartitions` - The counter part to `AssignedPartitions` following a rebalance. -`AssignedPartitions` and `RevokedPartitions` are symmetrical. -Requires `go.application.rebalance.enable` -

-

- * `PartitionEOF` - Consumer has reached the end of a partition. -NOTE: The consumer will keep trying to fetch new messages for the partition. -

-

- * `OffsetsCommitted` - Offset commit results (when `enable.auto.commit` is enabled). -

-

- Producer events -

-

- * `*kafka.Message` - delivery report for produced message. -Check `.TopicPartition.Error` for delivery result. -

-

- Generic events for both Consumer and Producer -

-

- * `KafkaError` - client (error codes are prefixed with _) or broker error. -These errors are normally just informational since the -client will try its best to automatically recover (eventually). -

-

- * `OAuthBearerTokenRefresh` - retrieval of a new SASL/OAUTHBEARER token is required. -This event only occurs with sasl.mechanism=OAUTHBEARER. -Be sure to invoke SetOAuthBearerToken() on the Producer/Consumer/AdminClient -instance when a successful token retrieval is completed, otherwise be sure to -invoke SetOAuthBearerTokenFailure() to indicate that retrieval failed (or -if setting the token failed, which could happen if an extension doesn't meet -the required regular expression); invoking SetOAuthBearerTokenFailure() will -schedule a new event for 10 seconds later so another retrieval can be attempted. -

-

- Hint: If your application registers a signal notification -(signal.Notify) makes sure the signals channel is buffered to avoid -possible complications with blocking Poll() calls. -

-

- Note: The Confluent Kafka Go client is safe for concurrent use. -

-
-
-
- -
-

- Index ▾ -

- -
-
-
- - Constants - -
-
- - func LibraryVersion() (int, string) - -
-
- - func WriteErrorCodes(f *os.File) - -
-
- - type ACLBinding - -
-
- - type ACLBindingFilter - -
-
- - type ACLBindingFilters - -
-
- - type ACLBindings - -
-
- - func (a ACLBindings) Len() int - -
-
- - func (a ACLBindings) Less(i, j int) bool - -
-
- - func (a ACLBindings) Swap(i, j int) - -
-
- - type ACLOperation - -
-
- - func ACLOperationFromString(aclOperationString string) (ACLOperation, error) - -
-
- - func (o ACLOperation) String() string - -
-
- - type ACLPermissionType - -
-
- - func ACLPermissionTypeFromString(aclPermissionTypeString string) (ACLPermissionType, error) - -
-
- - func (o ACLPermissionType) String() string - -
-
- - type AdminClient - -
-
- - func NewAdminClient(conf *ConfigMap) (*AdminClient, error) - -
-
- - func NewAdminClientFromConsumer(c *Consumer) (a *AdminClient, err error) - -
-
- - func NewAdminClientFromProducer(p *Producer) (a *AdminClient, err error) - -
-
- - func (a *AdminClient) AlterConfigs(ctx context.Context, resources []ConfigResource, options ...AlterConfigsAdminOption) (result []ConfigResourceResult, err error) - -
-
- - func (a *AdminClient) Close() - -
-
- - func (a *AdminClient) ClusterID(ctx context.Context) (clusterID string, err error) - -
-
- - func (a *AdminClient) ControllerID(ctx context.Context) (controllerID int32, err error) - -
-
- - func (a *AdminClient) CreateACLs(ctx context.Context, aclBindings ACLBindings, options ...CreateACLsAdminOption) (result []CreateACLResult, err error) - -
-
- - func (a *AdminClient) CreatePartitions(ctx context.Context, partitions []PartitionsSpecification, options ...CreatePartitionsAdminOption) (result []TopicResult, err error) - -
-
- - func (a *AdminClient) CreateTopics(ctx context.Context, topics []TopicSpecification, options ...CreateTopicsAdminOption) (result []TopicResult, err error) - -
-
- - func (a *AdminClient) DeleteACLs(ctx context.Context, aclBindingFilters ACLBindingFilters, options ...DeleteACLsAdminOption) (result []DeleteACLsResult, err error) - -
-
- - func (a *AdminClient) DeleteTopics(ctx context.Context, topics []string, options ...DeleteTopicsAdminOption) (result []TopicResult, err error) - -
-
- - func (a *AdminClient) DescribeACLs(ctx context.Context, aclBindingFilter ACLBindingFilter, options ...DescribeACLsAdminOption) (result *DescribeACLsResult, err error) - -
-
- - func (a *AdminClient) DescribeConfigs(ctx context.Context, resources []ConfigResource, options ...DescribeConfigsAdminOption) (result []ConfigResourceResult, err error) - -
-
- - func (a *AdminClient) GetMetadata(topic *string, allTopics bool, timeoutMs int) (*Metadata, error) - -
-
- - func (a *AdminClient) SetOAuthBearerToken(oauthBearerToken OAuthBearerToken) error - -
-
- - func (a *AdminClient) SetOAuthBearerTokenFailure(errstr string) error - -
-
- - func (a *AdminClient) String() string - -
-
- - type AdminOption - -
-
- - type AdminOptionOperationTimeout - -
-
- - func SetAdminOperationTimeout(t time.Duration) (ao AdminOptionOperationTimeout) - -
-
- - type AdminOptionRequestTimeout - -
-
- - func SetAdminRequestTimeout(t time.Duration) (ao AdminOptionRequestTimeout) - -
-
- - type AdminOptionValidateOnly - -
-
- - func SetAdminValidateOnly(validateOnly bool) (ao AdminOptionValidateOnly) - -
-
- - type AlterConfigsAdminOption - -
-
- - type AlterOperation - -
-
- - func (o AlterOperation) String() string - -
-
- - type AssignedPartitions - -
-
- - func (e AssignedPartitions) String() string - -
-
- - type BrokerMetadata - -
-
- - type ConfigEntry - -
-
- - func StringMapToConfigEntries(stringMap map[string]string, operation AlterOperation) []ConfigEntry - -
-
- - func (c ConfigEntry) String() string - -
-
- - type ConfigEntryResult - -
-
- - func (c ConfigEntryResult) String() string - -
-
- - type ConfigMap - -
-
- - func (m ConfigMap) Get(key string, defval ConfigValue) (ConfigValue, error) - -
-
- - func (m ConfigMap) Set(kv string) error - -
-
- - func (m ConfigMap) SetKey(key string, value ConfigValue) error - -
-
- - type ConfigResource - -
-
- - func (c ConfigResource) String() string - -
-
- - type ConfigResourceResult - -
-
- - func (c ConfigResourceResult) String() string - -
-
- - type ConfigSource - -
-
- - func (t ConfigSource) String() string - -
-
- - type ConfigValue - -
-
- - type Consumer - -
-
- - func NewConsumer(conf *ConfigMap) (*Consumer, error) - -
-
- - func (c *Consumer) Assign(partitions []TopicPartition) (err error) - -
-
- - func (c *Consumer) Assignment() (partitions []TopicPartition, err error) - -
-
- - func (c *Consumer) AssignmentLost() bool - -
-
- - func (c *Consumer) Close() (err error) - -
-
- - func (c *Consumer) Commit() ([]TopicPartition, error) - -
-
- - func (c *Consumer) CommitMessage(m *Message) ([]TopicPartition, error) - -
-
- - func (c *Consumer) CommitOffsets(offsets []TopicPartition) ([]TopicPartition, error) - -
-
- - func (c *Consumer) Committed(partitions []TopicPartition, timeoutMs int) (offsets []TopicPartition, err error) - -
-
- - func (c *Consumer) Events() chan Event - -
-
- - func (c *Consumer) GetConsumerGroupMetadata() (*ConsumerGroupMetadata, error) - -
-
- - func (c *Consumer) GetMetadata(topic *string, allTopics bool, timeoutMs int) (*Metadata, error) - -
-
- - func (c *Consumer) GetRebalanceProtocol() string - -
-
- - func (c *Consumer) GetWatermarkOffsets(topic string, partition int32) (low, high int64, err error) - -
-
- - func (c *Consumer) IncrementalAssign(partitions []TopicPartition) (err error) - -
-
- - func (c *Consumer) IncrementalUnassign(partitions []TopicPartition) (err error) - -
-
- - func (c *Consumer) Logs() chan LogEvent - -
-
- - func (c *Consumer) OffsetsForTimes(times []TopicPartition, timeoutMs int) (offsets []TopicPartition, err error) - -
-
- - func (c *Consumer) Pause(partitions []TopicPartition) (err error) - -
-
- - func (c *Consumer) Poll(timeoutMs int) (event Event) - -
-
- - func (c *Consumer) Position(partitions []TopicPartition) (offsets []TopicPartition, err error) - -
-
- - func (c *Consumer) QueryWatermarkOffsets(topic string, partition int32, timeoutMs int) (low, high int64, err error) - -
-
- - func (c *Consumer) ReadMessage(timeout time.Duration) (*Message, error) - -
-
- - func (c *Consumer) Resume(partitions []TopicPartition) (err error) - -
-
- - func (c *Consumer) Seek(partition TopicPartition, timeoutMs int) error - -
-
- - func (c *Consumer) SetOAuthBearerToken(oauthBearerToken OAuthBearerToken) error - -
-
- - func (c *Consumer) SetOAuthBearerTokenFailure(errstr string) error - -
-
- - func (c *Consumer) StoreMessage(m *Message) (storedOffsets []TopicPartition, err error) - -
-
- - func (c *Consumer) StoreOffsets(offsets []TopicPartition) (storedOffsets []TopicPartition, err error) - -
-
- - func (c *Consumer) String() string - -
-
- - func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error - -
-
- - func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) (err error) - -
-
- - func (c *Consumer) Subscription() (topics []string, err error) - -
-
- - func (c *Consumer) Unassign() (err error) - -
-
- - func (c *Consumer) Unsubscribe() (err error) - -
-
- - type ConsumerGroupMetadata - -
-
- - func NewTestConsumerGroupMetadata(groupID string) (*ConsumerGroupMetadata, error) - -
-
- - type CreateACLResult - -
-
- - type CreateACLsAdminOption - -
-
- - type CreatePartitionsAdminOption - -
-
- - type CreateTopicsAdminOption - -
-
- - type DeleteACLsAdminOption - -
-
- - type DeleteACLsResult - -
-
- - type DeleteTopicsAdminOption - -
-
- - type DescribeACLsAdminOption - -
-
- - type DescribeACLsResult - -
-
- - type DescribeConfigsAdminOption - -
-
- - type Error - -
-
- - func NewError(code ErrorCode, str string, fatal bool) (err Error) - -
-
- - func (e Error) Code() ErrorCode - -
-
- - func (e Error) Error() string - -
-
- - func (e Error) IsFatal() bool - -
-
- - func (e Error) IsRetriable() bool - -
-
- - func (e Error) String() string - -
-
- - func (e Error) TxnRequiresAbort() bool - -
-
- - type ErrorCode - -
-
- - func (c ErrorCode) String() string - -
-
- - type Event - -
-
- - type Handle - -
-
- - type Header - -
-
- - func (h Header) String() string - -
-
- - type LogEvent - -
-
- - func (logEvent LogEvent) String() string - -
-
- - type Message - -
-
- - func (m *Message) String() string - -
-
- - type Metadata - -
-
- - type MockCluster - -
-
- - func NewMockCluster(brokerCount int) (*MockCluster, error) - -
-
- - func (mc *MockCluster) BootstrapServers() string - -
-
- - func (mc *MockCluster) Close() - -
-
- - type OAuthBearerToken - -
-
- - type OAuthBearerTokenRefresh - -
-
- - func (o OAuthBearerTokenRefresh) String() string - -
-
- - type Offset - -
-
- - func NewOffset(offset interface{}) (Offset, error) - -
-
- - func OffsetTail(relativeOffset Offset) Offset - -
-
- - func (o *Offset) Set(offset interface{}) error - -
-
- - func (o Offset) String() string - -
-
- - type OffsetsCommitted - -
-
- - func (o OffsetsCommitted) String() string - -
-
- - type PartitionEOF - -
-
- - func (p PartitionEOF) String() string - -
-
- - type PartitionMetadata - -
-
- - type PartitionsSpecification - -
-
- - type Producer - -
-
- - func NewProducer(conf *ConfigMap) (*Producer, error) - -
-
- - func (p *Producer) AbortTransaction(ctx context.Context) error - -
-
- - func (p *Producer) BeginTransaction() error - -
-
- - func (p *Producer) Close() - -
-
- - func (p *Producer) CommitTransaction(ctx context.Context) error - -
-
- - func (p *Producer) Events() chan Event - -
-
- - func (p *Producer) Flush(timeoutMs int) int - -
-
- - func (p *Producer) GetFatalError() error - -
-
- - func (p *Producer) GetMetadata(topic *string, allTopics bool, timeoutMs int) (*Metadata, error) - -
-
- - func (p *Producer) InitTransactions(ctx context.Context) error - -
-
- - func (p *Producer) Len() int - -
-
- - func (p *Producer) Logs() chan LogEvent - -
-
- - func (p *Producer) OffsetsForTimes(times []TopicPartition, timeoutMs int) (offsets []TopicPartition, err error) - -
-
- - func (p *Producer) Produce(msg *Message, deliveryChan chan Event) error - -
-
- - func (p *Producer) ProduceChannel() chan *Message - -
-
- - func (p *Producer) Purge(flags int) error - -
-
- - func (p *Producer) QueryWatermarkOffsets(topic string, partition int32, timeoutMs int) (low, high int64, err error) - -
-
- - func (p *Producer) SendOffsetsToTransaction(ctx context.Context, offsets []TopicPartition, consumerMetadata *ConsumerGroupMetadata) error - -
-
- - func (p *Producer) SetOAuthBearerToken(oauthBearerToken OAuthBearerToken) error - -
-
- - func (p *Producer) SetOAuthBearerTokenFailure(errstr string) error - -
-
- - func (p *Producer) String() string - -
-
- - func (p *Producer) TestFatalError(code ErrorCode, str string) ErrorCode - -
-
- - type RebalanceCb - -
-
- - type ResourcePatternType - -
-
- - func ResourcePatternTypeFromString(patternTypeString string) (ResourcePatternType, error) - -
-
- - func (t ResourcePatternType) String() string - -
-
- - type ResourceType - -
-
- - func ResourceTypeFromString(typeString string) (ResourceType, error) - -
-
- - func (t ResourceType) String() string - -
-
- - type RevokedPartitions - -
-
- - func (e RevokedPartitions) String() string - -
-
- - type Stats - -
-
- - func (e Stats) String() string - -
-
- - type TimestampType - -
-
- - func (t TimestampType) String() string - -
-
- - type TopicMetadata - -
-
- - type TopicPartition - -
-
- - func (p TopicPartition) String() string - -
-
- - type TopicPartitions - -
-
- - func (tps TopicPartitions) Len() int - -
-
- - func (tps TopicPartitions) Less(i, j int) bool - -
-
- - func (tps TopicPartitions) Swap(i, j int) - -
-
- - type TopicResult - -
-
- - func (t TopicResult) String() string - -
-
- - type TopicSpecification - -
-
-
- -

- Package files -

-

- - - 00version.go - - - adminapi.go - - - adminoptions.go - - - build_glibc_linux.go - - - config.go - - - consumer.go - - - context.go - - - error.go - - - error_gen.go - - - event.go - - - generated_errors.go - - - handle.go - - - header.go - - - kafka.go - - - log.go - - - message.go - - - metadata.go - - - misc.go - - - mockcluster.go - - - offset.go - - - producer.go - - - testhelpers.go - - - time.go - - -

-
- -
- -

- Constants -

-
const (
-    // ResourceUnknown - Unknown
-    ResourceUnknown = ResourceType(C.RD_KAFKA_RESOURCE_UNKNOWN)
-    // ResourceAny - match any resource type (DescribeConfigs)
-    ResourceAny = ResourceType(C.RD_KAFKA_RESOURCE_ANY)
-    // ResourceTopic - Topic
-    ResourceTopic = ResourceType(C.RD_KAFKA_RESOURCE_TOPIC)
-    // ResourceGroup - Group
-    ResourceGroup = ResourceType(C.RD_KAFKA_RESOURCE_GROUP)
-    // ResourceBroker - Broker
-    ResourceBroker = ResourceType(C.RD_KAFKA_RESOURCE_BROKER)
-)
-
const (
-    // ConfigSourceUnknown is the default value
-    ConfigSourceUnknown = ConfigSource(C.RD_KAFKA_CONFIG_SOURCE_UNKNOWN_CONFIG)
-    // ConfigSourceDynamicTopic is dynamic topic config that is configured for a specific topic
-    ConfigSourceDynamicTopic = ConfigSource(C.RD_KAFKA_CONFIG_SOURCE_DYNAMIC_TOPIC_CONFIG)
-    // ConfigSourceDynamicBroker is dynamic broker config that is configured for a specific broker
-    ConfigSourceDynamicBroker = ConfigSource(C.RD_KAFKA_CONFIG_SOURCE_DYNAMIC_BROKER_CONFIG)
-    // ConfigSourceDynamicDefaultBroker is dynamic broker config that is configured as default for all brokers in the cluster
-    ConfigSourceDynamicDefaultBroker = ConfigSource(C.RD_KAFKA_CONFIG_SOURCE_DYNAMIC_DEFAULT_BROKER_CONFIG)
-    // ConfigSourceStaticBroker is static broker config provided as broker properties at startup (e.g. from server.properties file)
-    ConfigSourceStaticBroker = ConfigSource(C.RD_KAFKA_CONFIG_SOURCE_STATIC_BROKER_CONFIG)
-    // ConfigSourceDefault is built-in default configuration for configs that have a default value
-    ConfigSourceDefault = ConfigSource(C.RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG)
-)
-
const (
-    // ResourcePatternTypeUnknown is a resource pattern type not known or not set.
-    ResourcePatternTypeUnknown = ResourcePatternType(C.RD_KAFKA_RESOURCE_PATTERN_UNKNOWN)
-    // ResourcePatternTypeAny matches any resource, used for lookups.
-    ResourcePatternTypeAny = ResourcePatternType(C.RD_KAFKA_RESOURCE_PATTERN_ANY)
-    // ResourcePatternTypeMatch will perform pattern matching
-    ResourcePatternTypeMatch = ResourcePatternType(C.RD_KAFKA_RESOURCE_PATTERN_MATCH)
-    // ResourcePatternTypeLiteral matches a literal resource name
-    ResourcePatternTypeLiteral = ResourcePatternType(C.RD_KAFKA_RESOURCE_PATTERN_LITERAL)
-    // ResourcePatternTypePrefixed matches a prefixed resource name
-    ResourcePatternTypePrefixed = ResourcePatternType(C.RD_KAFKA_RESOURCE_PATTERN_PREFIXED)
-)
-
const (
-    // ACLOperationUnknown represents an unknown or unset operation
-    ACLOperationUnknown = ACLOperation(C.RD_KAFKA_ACL_OPERATION_UNKNOWN)
-    // ACLOperationAny in a filter, matches any ACLOperation
-    ACLOperationAny = ACLOperation(C.RD_KAFKA_ACL_OPERATION_ANY)
-    // ACLOperationAll represents all the operations
-    ACLOperationAll = ACLOperation(C.RD_KAFKA_ACL_OPERATION_ALL)
-    // ACLOperationRead a read operation
-    ACLOperationRead = ACLOperation(C.RD_KAFKA_ACL_OPERATION_READ)
-    // ACLOperationWrite represents a write operation
-    ACLOperationWrite = ACLOperation(C.RD_KAFKA_ACL_OPERATION_WRITE)
-    // ACLOperationCreate represents a create operation
-    ACLOperationCreate = ACLOperation(C.RD_KAFKA_ACL_OPERATION_CREATE)
-    // ACLOperationDelete represents a delete operation
-    ACLOperationDelete = ACLOperation(C.RD_KAFKA_ACL_OPERATION_DELETE)
-    // ACLOperationAlter represents an alter operation
-    ACLOperationAlter = ACLOperation(C.RD_KAFKA_ACL_OPERATION_ALTER)
-    // ACLOperationDescribe represents a describe operation
-    ACLOperationDescribe = ACLOperation(C.RD_KAFKA_ACL_OPERATION_DESCRIBE)
-    // ACLOperationClusterAction represents a cluster action operation
-    ACLOperationClusterAction = ACLOperation(C.RD_KAFKA_ACL_OPERATION_CLUSTER_ACTION)
-    // ACLOperationDescribeConfigs represents a describe configs operation
-    ACLOperationDescribeConfigs = ACLOperation(C.RD_KAFKA_ACL_OPERATION_DESCRIBE_CONFIGS)
-    // ACLOperationAlterConfigs represents an alter configs operation
-    ACLOperationAlterConfigs = ACLOperation(C.RD_KAFKA_ACL_OPERATION_ALTER_CONFIGS)
-    // ACLOperationIdempotentWrite represents an idempotent write operation
-    ACLOperationIdempotentWrite = ACLOperation(C.RD_KAFKA_ACL_OPERATION_IDEMPOTENT_WRITE)
-)
-
const (
-    // ACLPermissionTypeUnknown represents an unknown ACLPermissionType
-    ACLPermissionTypeUnknown = ACLPermissionType(C.RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN)
-    // ACLPermissionTypeAny in a filter, matches any ACLPermissionType
-    ACLPermissionTypeAny = ACLPermissionType(C.RD_KAFKA_ACL_PERMISSION_TYPE_ANY)
-    // ACLPermissionTypeDeny disallows access
-    ACLPermissionTypeDeny = ACLPermissionType(C.RD_KAFKA_ACL_PERMISSION_TYPE_DENY)
-    // ACLPermissionTypeAllow grants access
-    ACLPermissionTypeAllow = ACLPermissionType(C.RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW)
-)
-
const (
-    // TimestampNotAvailable indicates no timestamp was set, or not available due to lacking broker support
-    TimestampNotAvailable = TimestampType(C.RD_KAFKA_TIMESTAMP_NOT_AVAILABLE)
-    // TimestampCreateTime indicates timestamp set by producer (source time)
-    TimestampCreateTime = TimestampType(C.RD_KAFKA_TIMESTAMP_CREATE_TIME)
-    // TimestampLogAppendTime indicates timestamp set set by broker (store time)
-    TimestampLogAppendTime = TimestampType(C.RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME)
-)
-
const (
-    // PurgeInFlight purges messages in-flight to or from the broker.
-    // Purging these messages will void any future acknowledgements from the
-    // broker, making it impossible for the application to know if these
-    // messages were successfully delivered or not.
-    // Retrying these messages may lead to duplicates.
-    PurgeInFlight = int(C.RD_KAFKA_PURGE_F_INFLIGHT)
-
-    // PurgeQueue Purge messages in internal queues.
-    PurgeQueue = int(C.RD_KAFKA_PURGE_F_QUEUE)
-
-    // PurgeNonBlocking Don't wait for background thread queue purging to finish.
-    PurgeNonBlocking = int(C.RD_KAFKA_PURGE_F_NON_BLOCKING)
-)
-
const (
-    // AlterOperationSet sets/overwrites the configuration setting.
-    AlterOperationSet = iota
-)
-

- LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client -

-
const LibrdkafkaLinkInfo = "static glibc_linux from librdkafka-static-bundle-v1.9.2.tgz"
-

- OffsetBeginning represents the earliest offset (logical) -

-
const OffsetBeginning = Offset(C.RD_KAFKA_OFFSET_BEGINNING)
-

- OffsetEnd represents the latest offset (logical) -

-
const OffsetEnd = Offset(C.RD_KAFKA_OFFSET_END)
-

- OffsetInvalid represents an invalid/unspecified offset -

-
const OffsetInvalid = Offset(C.RD_KAFKA_OFFSET_INVALID)
-

- OffsetStored represents a stored offset -

-
const OffsetStored = Offset(C.RD_KAFKA_OFFSET_STORED)
-

- PartitionAny represents any partition (for partitioning), -or unspecified value (for all other cases) -

-
const PartitionAny = int32(C.RD_KAFKA_PARTITION_UA)
-

- func - - LibraryVersion - - -

-
func LibraryVersion() (int, string)
-

- LibraryVersion returns the underlying librdkafka library version as a -(version_int, version_str) tuple. -

-

- func - - WriteErrorCodes - - -

-
func WriteErrorCodes(f *os.File)
-

- WriteErrorCodes writes Go error code constants to file from the -librdkafka error codes. -This function is not intended for public use. -

-

- type - - ACLBinding - - -

-

- ACLBinding specifies the operation and permission type for a specific principal -over one or more resources of the same type. Used by `AdminClient.CreateACLs`, -returned by `AdminClient.DescribeACLs` and `AdminClient.DeleteACLs`. -

-
type ACLBinding struct {
-    Type ResourceType // The resource type.
-    // The resource name, which depends on the resource type.
-    // For ResourceBroker the resource name is the broker id.
-    Name                string
-    ResourcePatternType ResourcePatternType // The resource pattern, relative to the name.
-    Principal           string              // The principal this ACLBinding refers to.
-    Host                string              // The host that the call is allowed to come from.
-    Operation           ACLOperation        // The operation/s specified by this binding.
-    PermissionType      ACLPermissionType   // The permission type for the specified operation.
-}
-
-

- type - - ACLBindingFilter - - -

-

- ACLBindingFilter specifies a filter used to return a list of ACL bindings matching some or all of its attributes. -Used by `AdminClient.DescribeACLs` and `AdminClient.DeleteACLs`. -

-
type ACLBindingFilter = ACLBinding
-

- type - - ACLBindingFilters - - -

-

- ACLBindingFilters is a slice of ACLBindingFilter that also implements -the sort interface -

-
type ACLBindingFilters []ACLBindingFilter
-

- type - - ACLBindings - - -

-

- ACLBindings is a slice of ACLBinding that also implements -the sort interface -

-
type ACLBindings []ACLBinding
-

- func (ACLBindings) - - Len - - -

-
func (a ACLBindings) Len() int
-

- func (ACLBindings) - - Less - - -

-
func (a ACLBindings) Less(i, j int) bool
-

- func (ACLBindings) - - Swap - - -

-
func (a ACLBindings) Swap(i, j int)
-

- type - - ACLOperation - - -

-

- ACLOperation enumerates the different types of ACL operation. -

-
type ACLOperation int
-

- func - - ACLOperationFromString - - -

-
func ACLOperationFromString(aclOperationString string) (ACLOperation, error)
-

- ACLOperationFromString translates a ACL operation name to -a ACLOperation value. -

-

- func (ACLOperation) - - String - - -

-
func (o ACLOperation) String() string
-

- String returns the human-readable representation of an ACLOperation -

-

- type - - ACLPermissionType - - -

-

- ACLPermissionType enumerates the different types of ACL permission types. -

-
type ACLPermissionType int
-

- func - - ACLPermissionTypeFromString - - -

-
func ACLPermissionTypeFromString(aclPermissionTypeString string) (ACLPermissionType, error)
-

- ACLPermissionTypeFromString translates a ACL permission type name to -a ACLPermissionType value. -

-

- func (ACLPermissionType) - - String - - -

-
func (o ACLPermissionType) String() string
-

- String returns the human-readable representation of an ACLPermissionType -

-

- type - - AdminClient - - -

-

- AdminClient is derived from an existing Producer or Consumer -

-
type AdminClient struct {
-    // contains filtered or unexported fields
-}
-
-

- func - - NewAdminClient - - -

-
func NewAdminClient(conf *ConfigMap) (*AdminClient, error)
-

- NewAdminClient creats a new AdminClient instance with a new underlying client instance -

-

- func - - NewAdminClientFromConsumer - - -

-
func NewAdminClientFromConsumer(c *Consumer) (a *AdminClient, err error)
-

- NewAdminClientFromConsumer derives a new AdminClient from an existing Consumer instance. -The AdminClient will use the same configuration and connections as the parent instance. -

-

- func - - NewAdminClientFromProducer - - -

-
func NewAdminClientFromProducer(p *Producer) (a *AdminClient, err error)
-

- NewAdminClientFromProducer derives a new AdminClient from an existing Producer instance. -The AdminClient will use the same configuration and connections as the parent instance. -

-

- func (*AdminClient) - - AlterConfigs - - -

-
func (a *AdminClient) AlterConfigs(ctx context.Context, resources []ConfigResource, options ...AlterConfigsAdminOption) (result []ConfigResourceResult, err error)
-

- AlterConfigs alters/updates cluster resource configuration. -

-

- Updates are not transactional so they may succeed for a subset -of the provided resources while others fail. -The configuration for a particular resource is updated atomically, -replacing values using the provided ConfigEntrys and reverting -unspecified ConfigEntrys to their default values. -

-

- Requires broker version >=0.11.0.0 -

-

- AlterConfigs will replace all existing configuration for -the provided resources with the new configuration given, -reverting all other configuration to their default values. -

-

- Multiple resources and resource types may be set, but at most one -resource of type ResourceBroker is allowed per call since these -resource requests must be sent to the broker specified in the resource. -

-

- func (*AdminClient) - - Close - - -

-
func (a *AdminClient) Close()
-

- Close an AdminClient instance. -

-

- func (*AdminClient) - - ClusterID - - -

-
func (a *AdminClient) ClusterID(ctx context.Context) (clusterID string, err error)
-

- ClusterID returns the cluster ID as reported in broker metadata. -

-

- Note on cancellation: Although the underlying C function respects the -timeout, it currently cannot be manually cancelled. That means manually -cancelling the context will block until the C function call returns. -

-

- Requires broker version >= 0.10.0. -

-

- func (*AdminClient) - - ControllerID - - -

-
func (a *AdminClient) ControllerID(ctx context.Context) (controllerID int32, err error)
-

- ControllerID returns the broker ID of the current controller as reported in -broker metadata. -

-

- Note on cancellation: Although the underlying C function respects the -timeout, it currently cannot be manually cancelled. That means manually -cancelling the context will block until the C function call returns. -

-

- Requires broker version >= 0.10.0. -

-

- func (*AdminClient) - - CreateACLs - - -

-
func (a *AdminClient) CreateACLs(ctx context.Context, aclBindings ACLBindings, options ...CreateACLsAdminOption) (result []CreateACLResult, err error)
-

- CreateACLs creates one or more ACL bindings. -

-

- Parameters: -

-
* `ctx` - context with the maximum amount of time to block, or nil for indefinite.
-* `aclBindings` - A slice of ACL binding specifications to create.
-* `options` - Create ACLs options
-
-

- Returns a slice of CreateACLResult with a ErrNoError ErrorCode when the operation was successful -plus an error that is not nil for client level errors -

-

- func (*AdminClient) - - CreatePartitions - - -

-
func (a *AdminClient) CreatePartitions(ctx context.Context, partitions []PartitionsSpecification, options ...CreatePartitionsAdminOption) (result []TopicResult, err error)
-

- CreatePartitions creates additional partitions for topics. -

-

- func (*AdminClient) - - CreateTopics - - -

-
func (a *AdminClient) CreateTopics(ctx context.Context, topics []TopicSpecification, options ...CreateTopicsAdminOption) (result []TopicResult, err error)
-

- CreateTopics creates topics in cluster. -

-

- The list of TopicSpecification objects define the per-topic partition count, replicas, etc. -

-

- Topic creation is non-atomic and may succeed for some topics but fail for others, -make sure to check the result for topic-specific errors. -

-

- Note: TopicSpecification is analogous to NewTopic in the Java Topic Admin API. -

-

- func (*AdminClient) - - DeleteACLs - - -

-
func (a *AdminClient) DeleteACLs(ctx context.Context, aclBindingFilters ACLBindingFilters, options ...DeleteACLsAdminOption) (result []DeleteACLsResult, err error)
-

- DeleteACLs deletes ACL bindings matching one or more ACL binding filters. -

-

- Parameters: -

-
* `ctx` - context with the maximum amount of time to block, or nil for indefinite.
-* `aclBindingFilters` - a slice of ACL binding filters to match ACLs to delete.
-   string attributes match exact values or any string if set to empty string.
-   Enum attributes match exact values or any value if ending with `Any`.
-   If `ResourcePatternType` is set to `ResourcePatternTypeMatch` deletes ACL bindings with:
-   - `ResourcePatternTypeLiteral` pattern type with resource name equal to the given resource name
-   - `ResourcePatternTypeLiteral` pattern type with wildcard resource name that matches the given resource name
-   - `ResourcePatternTypePrefixed` pattern type with resource name that is a prefix of the given resource name
-* `options` - Delete ACLs options
-
-

- Returns a slice of ACLBinding for each filter when the operation was successful -plus an error that is not `nil` for client level errors -

-

- func (*AdminClient) - - DeleteTopics - - -

-
func (a *AdminClient) DeleteTopics(ctx context.Context, topics []string, options ...DeleteTopicsAdminOption) (result []TopicResult, err error)
-

- DeleteTopics deletes a batch of topics. -

-

- This operation is not transactional and may succeed for a subset of topics while -failing others. -It may take several seconds after the DeleteTopics result returns success for -all the brokers to become aware that the topics are gone. During this time, -topic metadata and configuration may continue to return information about deleted topics. -

-

- Requires broker version >= 0.10.1.0 -

-

- func (*AdminClient) - - DescribeACLs - - -

-
func (a *AdminClient) DescribeACLs(ctx context.Context, aclBindingFilter ACLBindingFilter, options ...DescribeACLsAdminOption) (result *DescribeACLsResult, err error)
-

- DescribeACLs matches ACL bindings by filter. -

-

- Parameters: -

-
* `ctx` - context with the maximum amount of time to block, or nil for indefinite.
-* `aclBindingFilter` - A filter with attributes that must match.
-   string attributes match exact values or any string if set to empty string.
-   Enum attributes match exact values or any value if ending with `Any`.
-   If `ResourcePatternType` is set to `ResourcePatternTypeMatch` returns ACL bindings with:
-   - `ResourcePatternTypeLiteral` pattern type with resource name equal to the given resource name
-   - `ResourcePatternTypeLiteral` pattern type with wildcard resource name that matches the given resource name
-   - `ResourcePatternTypePrefixed` pattern type with resource name that is a prefix of the given resource name
-* `options` - Describe ACLs options
-
-

- Returns a slice of ACLBindings when the operation was successful -plus an error that is not `nil` for client level errors -

-

- func (*AdminClient) - - DescribeConfigs - - -

-
func (a *AdminClient) DescribeConfigs(ctx context.Context, resources []ConfigResource, options ...DescribeConfigsAdminOption) (result []ConfigResourceResult, err error)
-

- DescribeConfigs retrieves configuration for cluster resources. -

-

- The returned configuration includes default values, use -ConfigEntryResult.IsDefault or ConfigEntryResult.Source to distinguish -default values from manually configured settings. -

-

- The value of config entries where .IsSensitive is true -will always be nil to avoid disclosing sensitive -information, such as security settings. -

-

- Configuration entries where .IsReadOnly is true can't be modified -(with AlterConfigs). -

-

- Synonym configuration entries are returned if the broker supports -it (broker version >= 1.1.0). See .Synonyms. -

-

- Requires broker version >=0.11.0.0 -

-

- Multiple resources and resource types may be requested, but at most -one resource of type ResourceBroker is allowed per call -since these resource requests must be sent to the broker specified -in the resource. -

-

- func (*AdminClient) - - GetMetadata - - -

-
func (a *AdminClient) GetMetadata(topic *string, allTopics bool, timeoutMs int) (*Metadata, error)
-

- GetMetadata queries broker for cluster and topic metadata. -If topic is non-nil only information about that topic is returned, else if -allTopics is false only information about locally used topics is returned, -else information about all topics is returned. -GetMetadata is equivalent to listTopics, describeTopics and describeCluster in the Java API. -

-

- func (*AdminClient) - - SetOAuthBearerToken - - -

-
func (a *AdminClient) SetOAuthBearerToken(oauthBearerToken OAuthBearerToken) error
-

- SetOAuthBearerToken sets the the data to be transmitted -to a broker during SASL/OAUTHBEARER authentication. It will return nil -on success, otherwise an error if: -1) the token data is invalid (meaning an expiration time in the past -or either a token value or an extension key or value that does not meet -the regular expression requirements as per - - https://tools.ietf.org/html/rfc7628#section-3.1 - - ); -2) SASL/OAUTHBEARER is not supported by the underlying librdkafka build; -3) SASL/OAUTHBEARER is supported but is not configured as the client's -authentication mechanism. -

-

- func (*AdminClient) - - SetOAuthBearerTokenFailure - - -

-
func (a *AdminClient) SetOAuthBearerTokenFailure(errstr string) error
-

- SetOAuthBearerTokenFailure sets the error message describing why token -retrieval/setting failed; it also schedules a new token refresh event for 10 -seconds later so the attempt may be retried. It will return nil on -success, otherwise an error if: -1) SASL/OAUTHBEARER is not supported by the underlying librdkafka build; -2) SASL/OAUTHBEARER is supported but is not configured as the client's -authentication mechanism. -

-

- func (*AdminClient) - - String - - -

-
func (a *AdminClient) String() string
-

- String returns a human readable name for an AdminClient instance -

-

- type - - AdminOption - - -

-

- AdminOption is a generic type not to be used directly. -

-

- See CreateTopicsAdminOption et.al. -

-
type AdminOption interface {
-    // contains filtered or unexported methods
-}
-

- type - - AdminOptionOperationTimeout - - -

-

- AdminOptionOperationTimeout sets the broker's operation timeout, such as the -timeout for CreateTopics to complete the creation of topics on the controller -before returning a result to the application. -

-

- CreateTopics, DeleteTopics, CreatePartitions: -a value 0 will return immediately after triggering topic -creation, while > 0 will wait this long for topic creation to propagate -in cluster. -

-

- Default: 0 (return immediately). -

-

- Valid for CreateTopics, DeleteTopics, CreatePartitions. -

-
type AdminOptionOperationTimeout struct {
-    // contains filtered or unexported fields
-}
-
-

- func - - SetAdminOperationTimeout - - -

-
func SetAdminOperationTimeout(t time.Duration) (ao AdminOptionOperationTimeout)
-

- SetAdminOperationTimeout sets the broker's operation timeout, such as the -timeout for CreateTopics to complete the creation of topics on the controller -before returning a result to the application. -

-

- CreateTopics, DeleteTopics, CreatePartitions: -a value 0 will return immediately after triggering topic -creation, while > 0 will wait this long for topic creation to propagate -in cluster. -

-

- Default: 0 (return immediately). -

-

- Valid for CreateTopics, DeleteTopics, CreatePartitions. -

-

- type - - AdminOptionRequestTimeout - - -

-

- AdminOptionRequestTimeout sets the overall request timeout, including broker -lookup, request transmission, operation time on broker, and response. -

-

- Default: `socket.timeout.ms`. -

-

- Valid for all Admin API methods. -

-
type AdminOptionRequestTimeout struct {
-    // contains filtered or unexported fields
-}
-
-

- func - - SetAdminRequestTimeout - - -

-
func SetAdminRequestTimeout(t time.Duration) (ao AdminOptionRequestTimeout)
-

- SetAdminRequestTimeout sets the overall request timeout, including broker -lookup, request transmission, operation time on broker, and response. -

-

- Default: `socket.timeout.ms`. -

-

- Valid for all Admin API methods. -

-

- type - - AdminOptionValidateOnly - - -

-

- AdminOptionValidateOnly tells the broker to only validate the request, -without performing the requested operation (create topics, etc). -

-

- Default: false. -

-

- Valid for CreateTopics, CreatePartitions, AlterConfigs -

-
type AdminOptionValidateOnly struct {
-    // contains filtered or unexported fields
-}
-
-

- func - - SetAdminValidateOnly - - -

-
func SetAdminValidateOnly(validateOnly bool) (ao AdminOptionValidateOnly)
-

- SetAdminValidateOnly tells the broker to only validate the request, -without performing the requested operation (create topics, etc). -

-

- Default: false. -

-

- Valid for CreateTopics, DeleteTopics, CreatePartitions, AlterConfigs -

-

- type - - AlterConfigsAdminOption - - -

-

- AlterConfigsAdminOption - see setters. -

-

- See SetAdminRequestTimeout, SetAdminValidateOnly, SetAdminIncremental. -

-
type AlterConfigsAdminOption interface {
-    // contains filtered or unexported methods
-}
-

- type - - AlterOperation - - -

-

- AlterOperation specifies the operation to perform on the ConfigEntry. -Currently only AlterOperationSet. -

-
type AlterOperation int
-

- func (AlterOperation) - - String - - -

-
func (o AlterOperation) String() string
-

- String returns the human-readable representation of an AlterOperation -

-

- type - - AssignedPartitions - - -

-

- AssignedPartitions consumer group rebalance event: assigned partition set -

-
type AssignedPartitions struct {
-    Partitions []TopicPartition
-}
-
-

- func (AssignedPartitions) - - String - - -

-
func (e AssignedPartitions) String() string
-

- type - - BrokerMetadata - - -

-

- BrokerMetadata contains per-broker metadata -

-
type BrokerMetadata struct {
-    ID   int32
-    Host string
-    Port int
-}
-
-

- type - - ConfigEntry - - -

-

- ConfigEntry holds parameters for altering a resource's configuration. -

-
type ConfigEntry struct {
-    // Name of configuration entry, e.g., topic configuration property name.
-    Name string
-    // Value of configuration entry.
-    Value string
-    // Operation to perform on the entry.
-    Operation AlterOperation
-}
-
-

- func - - StringMapToConfigEntries - - -

-
func StringMapToConfigEntries(stringMap map[string]string, operation AlterOperation) []ConfigEntry
-

- StringMapToConfigEntries creates a new map of ConfigEntry objects from the -provided string map. The AlterOperation is set on each created entry. -

-

- func (ConfigEntry) - - String - - -

-
func (c ConfigEntry) String() string
-

- String returns a human-readable representation of a ConfigEntry. -

-

- type - - ConfigEntryResult - - -

-

- ConfigEntryResult contains the result of a single configuration entry from a -DescribeConfigs request. -

-
type ConfigEntryResult struct {
-    // Name of configuration entry, e.g., topic configuration property name.
-    Name string
-    // Value of configuration entry.
-    Value string
-    // Source indicates the configuration source.
-    Source ConfigSource
-    // IsReadOnly indicates whether the configuration entry can be altered.
-    IsReadOnly bool
-    // IsSensitive indicates whether the configuration entry contains sensitive information, in which case the value will be unset.
-    IsSensitive bool
-    // IsSynonym indicates whether the configuration entry is a synonym for another configuration property.
-    IsSynonym bool
-    // Synonyms contains a map of configuration entries that are synonyms to this configuration entry.
-    Synonyms map[string]ConfigEntryResult
-}
-
-

- func (ConfigEntryResult) - - String - - -

-
func (c ConfigEntryResult) String() string
-

- String returns a human-readable representation of a ConfigEntryResult. -

-

- type - - ConfigMap - - -

-

- ConfigMap is a map containing standard librdkafka configuration properties as documented in: - - https://github.com/edenhill/librdkafka/tree/master/CONFIGURATION.md - -

-

- The special property "default.topic.config" (optional) is a ConfigMap -containing default topic configuration properties. -

-

- The use of "default.topic.config" is deprecated, -topic configuration properties shall be specified in the standard ConfigMap. -For backwards compatibility, "default.topic.config" (if supplied) -takes precedence. -

-
type ConfigMap map[string]ConfigValue
-

- func (ConfigMap) - - Get - - -

-
func (m ConfigMap) Get(key string, defval ConfigValue) (ConfigValue, error)
-

- Get finds the given key in the ConfigMap and returns its value. -If the key is not found `defval` is returned. -If the key is found but the type does not match that of `defval` (unless nil) -an ErrInvalidArg error is returned. -

-

- func (ConfigMap) - - Set - - -

-
func (m ConfigMap) Set(kv string) error
-

- Set implements flag.Set (command line argument parser) as a convenience -for `-X key=value` config. -

-

- func (ConfigMap) - - SetKey - - -

-
func (m ConfigMap) SetKey(key string, value ConfigValue) error
-

- SetKey sets configuration property key to value. -

-

- For user convenience a key prefixed with {topic}. will be -set on the "default.topic.config" sub-map, this use is deprecated. -

-

- type - - ConfigResource - - -

-

- ConfigResource holds parameters for altering an Apache Kafka configuration resource -

-
type ConfigResource struct {
-    // Type of resource to set.
-    Type ResourceType
-    // Name of resource to set.
-    Name string
-    // Config entries to set.
-    // Configuration updates are atomic, any configuration property not provided
-    // here will be reverted (by the broker) to its default value.
-    // Use DescribeConfigs to retrieve the list of current configuration entry values.
-    Config []ConfigEntry
-}
-
-

- func (ConfigResource) - - String - - -

-
func (c ConfigResource) String() string
-

- String returns a human-readable representation of a ConfigResource -

-

- type - - ConfigResourceResult - - -

-

- ConfigResourceResult provides the result for a resource from a AlterConfigs or -DescribeConfigs request. -

-
type ConfigResourceResult struct {
-    // Type of returned result resource.
-    Type ResourceType
-    // Name of returned result resource.
-    Name string
-    // Error, if any, of returned result resource.
-    Error Error
-    // Config entries, if any, of returned result resource.
-    Config map[string]ConfigEntryResult
-}
-
-

- func (ConfigResourceResult) - - String - - -

-
func (c ConfigResourceResult) String() string
-

- String returns a human-readable representation of a ConfigResourceResult. -

-

- type - - ConfigSource - - -

-

- ConfigSource represents an Apache Kafka config source -

-
type ConfigSource int
-

- func (ConfigSource) - - String - - -

-
func (t ConfigSource) String() string
-

- String returns the human-readable representation of a ConfigSource type -

-

- type - - ConfigValue - - -

-

- ConfigValue supports the following types: -

-
bool, int, string, any type with the standard String() interface
-
-
type ConfigValue interface{}
-

- type - - Consumer - - -

-

- Consumer implements a High-level Apache Kafka Consumer instance -

-
type Consumer struct {
-    // contains filtered or unexported fields
-}
-
-

- func - - NewConsumer - - -

-
func NewConsumer(conf *ConfigMap) (*Consumer, error)
-

- NewConsumer creates a new high-level Consumer instance. -

-

- conf is a *ConfigMap with standard librdkafka configuration properties. -

-

- Supported special configuration properties: -

-
go.application.rebalance.enable (bool, false) - Forward rebalancing responsibility to application via the Events() channel.
-                                     If set to true the app must handle the AssignedPartitions and
-                                     RevokedPartitions events and call Assign() and Unassign()
-                                     respectively.
-go.events.channel.enable (bool, false) - [deprecated] Enable the Events() channel. Messages and events will be pushed on the Events() channel and the Poll() interface will be disabled.
-go.events.channel.size (int, 1000) - Events() channel size
-go.logs.channel.enable (bool, false) - Forward log to Logs() channel.
-go.logs.channel (chan kafka.LogEvent, nil) - Forward logs to application-provided channel instead of Logs(). Requires go.logs.channel.enable=true.
-
-

- WARNING: Due to the buffering nature of channels (and queues in general) the -use of the events channel risks receiving outdated events and -messages. Minimizing go.events.channel.size reduces the risk -and number of outdated events and messages but does not eliminate -the factor completely. With a channel size of 1 at most one -event or message may be outdated. -

-

- func (*Consumer) - - Assign - - -

-
func (c *Consumer) Assign(partitions []TopicPartition) (err error)
-

- Assign an atomic set of partitions to consume. -

-

- The .Offset field of each TopicPartition must either be set to an absolute -starting offset (>= 0), or one of the logical offsets (`kafka.OffsetEnd` etc), -but should typically be set to `kafka.OffsetStored` to have the consumer -use the committed offset as a start position, with a fallback to -`auto.offset.reset` if there is no committed offset. -

-

- This replaces the current assignment. -

-

- func (*Consumer) - - Assignment - - -

-
func (c *Consumer) Assignment() (partitions []TopicPartition, err error)
-

- Assignment returns the current partition assignments -

-

- func (*Consumer) - - AssignmentLost - - -

-
func (c *Consumer) AssignmentLost() bool
-

- AssignmentLost returns true if current partition assignment has been lost. -This method is only applicable for use with a subscribing consumer when -handling a rebalance event or callback. -Partitions that have been lost may already be owned by other members in the -group and therefore commiting offsets, for example, may fail. -

-

- func (*Consumer) - - Close - - -

-
func (c *Consumer) Close() (err error)
-

- Close Consumer instance. -The object is no longer usable after this call. -

-

- func (*Consumer) - - Commit - - -

-
func (c *Consumer) Commit() ([]TopicPartition, error)
-

- Commit offsets for currently assigned partitions -This is a blocking call. -Returns the committed offsets on success. -

-

- func (*Consumer) - - CommitMessage - - -

-
func (c *Consumer) CommitMessage(m *Message) ([]TopicPartition, error)
-

- CommitMessage commits offset based on the provided message. -This is a blocking call. -Returns the committed offsets on success. -

-

- func (*Consumer) - - CommitOffsets - - -

-
func (c *Consumer) CommitOffsets(offsets []TopicPartition) ([]TopicPartition, error)
-

- CommitOffsets commits the provided list of offsets -This is a blocking call. -Returns the committed offsets on success. -

-

- func (*Consumer) - - Committed - - -

-
func (c *Consumer) Committed(partitions []TopicPartition, timeoutMs int) (offsets []TopicPartition, err error)
-

- Committed retrieves committed offsets for the given set of partitions -

-

- func (*Consumer) - - Events - - -

-
func (c *Consumer) Events() chan Event
-

- Events returns the Events channel (if enabled) -

-

- func (*Consumer) - - GetConsumerGroupMetadata - - -

-
func (c *Consumer) GetConsumerGroupMetadata() (*ConsumerGroupMetadata, error)
-

- GetConsumerGroupMetadata returns the consumer's current group metadata. -This object should be passed to the transactional producer's -SendOffsetsToTransaction() API. -

-

- func (*Consumer) - - GetMetadata - - -

-
func (c *Consumer) GetMetadata(topic *string, allTopics bool, timeoutMs int) (*Metadata, error)
-

- GetMetadata queries broker for cluster and topic metadata. -If topic is non-nil only information about that topic is returned, else if -allTopics is false only information about locally used topics is returned, -else information about all topics is returned. -GetMetadata is equivalent to listTopics, describeTopics and describeCluster in the Java API. -

-

- func (*Consumer) - - GetRebalanceProtocol - - -

-
func (c *Consumer) GetRebalanceProtocol() string
-

- GetRebalanceProtocol returns the current consumer group rebalance protocol, -which is either "EAGER" or "COOPERATIVE". -If the rebalance protocol is not known in the current state an empty string -is returned. -Should typically only be called during rebalancing. -

-

- func (*Consumer) - - GetWatermarkOffsets - - -

-
func (c *Consumer) GetWatermarkOffsets(topic string, partition int32) (low, high int64, err error)
-

- GetWatermarkOffsets returns the cached low and high offsets for the given topic -and partition. The high offset is populated on every fetch response or via calling QueryWatermarkOffsets. -The low offset is populated every statistics.interval.ms if that value is set. -OffsetInvalid will be returned if there is no cached offset for either value. -

-

- func (*Consumer) - - IncrementalAssign - - -

-
func (c *Consumer) IncrementalAssign(partitions []TopicPartition) (err error)
-

- IncrementalAssign adds the specified partitions to the current set of -partitions to consume. -

-

- The .Offset field of each TopicPartition must either be set to an absolute -starting offset (>= 0), or one of the logical offsets (`kafka.OffsetEnd` etc), -but should typically be set to `kafka.OffsetStored` to have the consumer -use the committed offset as a start position, with a fallback to -`auto.offset.reset` if there is no committed offset. -

-

- The new partitions must not be part of the current assignment. -

-

- func (*Consumer) - - IncrementalUnassign - - -

-
func (c *Consumer) IncrementalUnassign(partitions []TopicPartition) (err error)
-

- IncrementalUnassign removes the specified partitions from the current set of -partitions to consume. -

-

- The .Offset field of the TopicPartition is ignored. -

-

- The removed partitions must be part of the current assignment. -

-

- func (*Consumer) - - Logs - - -

-
func (c *Consumer) Logs() chan LogEvent
-

- Logs returns the log channel if enabled, or nil otherwise. -

-

- func (*Consumer) - - OffsetsForTimes - - -

-
func (c *Consumer) OffsetsForTimes(times []TopicPartition, timeoutMs int) (offsets []TopicPartition, err error)
-

- OffsetsForTimes looks up offsets by timestamp for the given partitions. -

-

- The returned offset for each partition is the earliest offset whose -timestamp is greater than or equal to the given timestamp in the -corresponding partition. If the provided timestamp exceeds that of the -last message in the partition, a value of -1 will be returned. -

-

- The timestamps to query are represented as `.Offset` in the `times` -argument and the looked up offsets are represented as `.Offset` in the returned -`offsets` list. -

-

- The function will block for at most timeoutMs milliseconds. -

-

- Duplicate Topic+Partitions are not supported. -Per-partition errors may be returned in the `.Error` field. -

-

- func (*Consumer) - - Pause - - -

-
func (c *Consumer) Pause(partitions []TopicPartition) (err error)
-

- Pause consumption for the provided list of partitions -

-

- Note that messages already enqueued on the consumer's Event channel -(if `go.events.channel.enable` has been set) will NOT be purged by -this call, set `go.events.channel.size` accordingly. -

-

- func (*Consumer) - - Poll - - -

-
func (c *Consumer) Poll(timeoutMs int) (event Event)
-

- Poll the consumer for messages or events. -

-

- Will block for at most timeoutMs milliseconds -

-

- The following callbacks may be triggered: -

-
Subscribe()'s rebalanceCb
-
-

- Returns nil on timeout, else an Event -

-

- func (*Consumer) - - Position - - -

-
func (c *Consumer) Position(partitions []TopicPartition) (offsets []TopicPartition, err error)
-

- Position returns the current consume position for the given partitions. -Typical use is to call Assignment() to get the partition list -and then pass it to Position() to get the current consume position for -each of the assigned partitions. -The consume position is the next message to read from the partition. -i.e., the offset of the last message seen by the application + 1. -

-

- func (*Consumer) - - QueryWatermarkOffsets - - -

-
func (c *Consumer) QueryWatermarkOffsets(topic string, partition int32, timeoutMs int) (low, high int64, err error)
-

- QueryWatermarkOffsets queries the broker for the low and high offsets for the given topic and partition. -

-

- func (*Consumer) - - ReadMessage - - -

-
func (c *Consumer) ReadMessage(timeout time.Duration) (*Message, error)
-

- ReadMessage polls the consumer for a message. -

-

- This is a convenience API that wraps Poll() and only returns -messages or errors. All other event types are discarded. -

-

- The call will block for at most `timeout` waiting for -a new message or error. `timeout` may be set to -1 for -indefinite wait. -

-

- Timeout is returned as (nil, err) where err is `err.(kafka.Error).Code() == kafka.ErrTimedOut`. -

-

- Messages are returned as (msg, nil), -while general errors are returned as (nil, err), -and partition-specific errors are returned as (msg, err) where -msg.TopicPartition provides partition-specific information (such as topic, partition and offset). -

-

- All other event types, such as PartitionEOF, AssignedPartitions, etc, are silently discarded. -

-

- func (*Consumer) - - Resume - - -

-
func (c *Consumer) Resume(partitions []TopicPartition) (err error)
-

- Resume consumption for the provided list of partitions -

-

- func (*Consumer) - - Seek - - -

-
func (c *Consumer) Seek(partition TopicPartition, timeoutMs int) error
-

- Seek seeks the given topic partitions using the offset from the TopicPartition. -

-

- If timeoutMs is not 0 the call will wait this long for the -seek to be performed. If the timeout is reached the internal state -will be unknown and this function returns ErrTimedOut. -If timeoutMs is 0 it will initiate the seek but return -immediately without any error reporting (e.g., async). -

-

- Seek() may only be used for partitions already being consumed -(through Assign() or implicitly through a self-rebalanced Subscribe()). -To set the starting offset it is preferred to use Assign() and provide -a starting offset for each partition. -

-

- Returns an error on failure or nil otherwise. -

-

- func (*Consumer) - - SetOAuthBearerToken - - -

-
func (c *Consumer) SetOAuthBearerToken(oauthBearerToken OAuthBearerToken) error
-

- SetOAuthBearerToken sets the the data to be transmitted -to a broker during SASL/OAUTHBEARER authentication. It will return nil -on success, otherwise an error if: -1) the token data is invalid (meaning an expiration time in the past -or either a token value or an extension key or value that does not meet -the regular expression requirements as per - - https://tools.ietf.org/html/rfc7628#section-3.1 - - ); -2) SASL/OAUTHBEARER is not supported by the underlying librdkafka build; -3) SASL/OAUTHBEARER is supported but is not configured as the client's -authentication mechanism. -

-

- func (*Consumer) - - SetOAuthBearerTokenFailure - - -

-
func (c *Consumer) SetOAuthBearerTokenFailure(errstr string) error
-

- SetOAuthBearerTokenFailure sets the error message describing why token -retrieval/setting failed; it also schedules a new token refresh event for 10 -seconds later so the attempt may be retried. It will return nil on -success, otherwise an error if: -1) SASL/OAUTHBEARER is not supported by the underlying librdkafka build; -2) SASL/OAUTHBEARER is supported but is not configured as the client's -authentication mechanism. -

-

- func (*Consumer) - - StoreMessage - - -

-
func (c *Consumer) StoreMessage(m *Message) (storedOffsets []TopicPartition, err error)
-

- StoreMessage stores offset based on the provided message. -This is a convenience method that uses StoreOffsets to do the actual work. -

-

- func (*Consumer) - - StoreOffsets - - -

-
func (c *Consumer) StoreOffsets(offsets []TopicPartition) (storedOffsets []TopicPartition, err error)
-

- StoreOffsets stores the provided list of offsets that will be committed -to the offset store according to `auto.commit.interval.ms` or manual -offset-less Commit(). -

-

- Returns the stored offsets on success. If at least one offset couldn't be stored, -an error and a list of offsets is returned. Each offset can be checked for -specific errors via its `.Error` member. -

-

- func (*Consumer) - - String - - -

-
func (c *Consumer) String() string
-

- Strings returns a human readable name for a Consumer instance -

-

- func (*Consumer) - - Subscribe - - -

-
func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error
-

- Subscribe to a single topic -This replaces the current subscription -

-

- func (*Consumer) - - SubscribeTopics - - -

-
func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) (err error)
-

- SubscribeTopics subscribes to the provided list of topics. -This replaces the current subscription. -

-

- func (*Consumer) - - Subscription - - -

-
func (c *Consumer) Subscription() (topics []string, err error)
-

- Subscription returns the current subscription as set by Subscribe() -

-

- func (*Consumer) - - Unassign - - -

-
func (c *Consumer) Unassign() (err error)
-

- Unassign the current set of partitions to consume. -

-

- func (*Consumer) - - Unsubscribe - - -

-
func (c *Consumer) Unsubscribe() (err error)
-

- Unsubscribe from the current subscription, if any. -

-

- type - - ConsumerGroupMetadata - - -

-

- ConsumerGroupMetadata reflects the current consumer group member metadata. -

-
type ConsumerGroupMetadata struct {
-    // contains filtered or unexported fields
-}
-
-

- func - - NewTestConsumerGroupMetadata - - -

-
func NewTestConsumerGroupMetadata(groupID string) (*ConsumerGroupMetadata, error)
-

- NewTestConsumerGroupMetadata creates a new consumer group metadata instance -mainly for testing use. -Use GetConsumerGroupMetadata() to retrieve the real metadata. -

-

- type - - CreateACLResult - - -

-

- CreateACLResult provides create ACL error information. -

-
type CreateACLResult struct {
-    // Error, if any, of result. Check with `Error.Code() != ErrNoError`.
-    Error Error
-}
-
-

- type - - CreateACLsAdminOption - - -

-

- CreateACLsAdminOption - see setter. -

-

- See SetAdminRequestTimeout -

-
type CreateACLsAdminOption interface {
-    // contains filtered or unexported methods
-}
-

- type - - CreatePartitionsAdminOption - - -

-

- CreatePartitionsAdminOption - see setters. -

-

- See SetAdminRequestTimeout, SetAdminOperationTimeout, SetAdminValidateOnly. -

-
type CreatePartitionsAdminOption interface {
-    // contains filtered or unexported methods
-}
-

- type - - CreateTopicsAdminOption - - -

-

- CreateTopicsAdminOption - see setters. -

-

- See SetAdminRequestTimeout, SetAdminOperationTimeout, SetAdminValidateOnly. -

-
type CreateTopicsAdminOption interface {
-    // contains filtered or unexported methods
-}
-

- type - - DeleteACLsAdminOption - - -

-

- DeleteACLsAdminOption - see setter. -

-

- See SetAdminRequestTimeout -

-
type DeleteACLsAdminOption interface {
-    // contains filtered or unexported methods
-}
-

- type - - DeleteACLsResult - - -

-

- DeleteACLsResult provides delete ACLs result or error information. -

-
type DeleteACLsResult = DescribeACLsResult
-

- type - - DeleteTopicsAdminOption - - -

-

- DeleteTopicsAdminOption - see setters. -

-

- See SetAdminRequestTimeout, SetAdminOperationTimeout. -

-
type DeleteTopicsAdminOption interface {
-    // contains filtered or unexported methods
-}
-

- type - - DescribeACLsAdminOption - - -

-

- DescribeACLsAdminOption - see setter. -

-

- See SetAdminRequestTimeout -

-
type DescribeACLsAdminOption interface {
-    // contains filtered or unexported methods
-}
-

- type - - DescribeACLsResult - - -

-

- DescribeACLsResult provides describe ACLs result or error information. -

-
type DescribeACLsResult struct {
-    // Slice of ACL bindings matching the provided filter
-    ACLBindings ACLBindings
-    // Error, if any, of result. Check with `Error.Code() != ErrNoError`.
-    Error Error
-}
-
-

- type - - DescribeConfigsAdminOption - - -

-

- DescribeConfigsAdminOption - see setters. -

-

- See SetAdminRequestTimeout. -

-
type DescribeConfigsAdminOption interface {
-    // contains filtered or unexported methods
-}
-

- type - - Error - - -

-

- Error provides a Kafka-specific error container -

-
type Error struct {
-    // contains filtered or unexported fields
-}
-
-

- func - - NewError - - -

-
func NewError(code ErrorCode, str string, fatal bool) (err Error)
-

- NewError creates a new Error. -

-

- func (Error) - - Code - - -

-
func (e Error) Code() ErrorCode
-

- Code returns the ErrorCode of an Error -

-

- func (Error) - - Error - - -

-
func (e Error) Error() string
-

- Error returns a human readable representation of an Error -Same as Error.String() -

-

- func (Error) - - IsFatal - - -

-
func (e Error) IsFatal() bool
-

- IsFatal returns true if the error is a fatal error. -A fatal error indicates the client instance is no longer operable and -should be terminated. Typical causes include non-recoverable -idempotent producer errors. -

-

- func (Error) - - IsRetriable - - -

-
func (e Error) IsRetriable() bool
-

- IsRetriable returns true if the operation that caused this error -may be retried. -This flag is currently only set by the Transactional producer API. -

-

- func (Error) - - String - - -

-
func (e Error) String() string
-

- String returns a human readable representation of an Error -

-

- func (Error) - - TxnRequiresAbort - - -

-
func (e Error) TxnRequiresAbort() bool
-

- TxnRequiresAbort returns true if the error is an abortable transaction error -that requires the application to abort the current transaction with -AbortTransaction() and start a new transaction with BeginTransaction() -if it wishes to proceed with transactional operations. -This flag is only set by the Transactional producer API. -

-

- type - - ErrorCode - - -

-

- ErrorCode is the integer representation of local and broker error codes -

-
type ErrorCode int
-
const (
-    // ErrBadMsg Local: Bad message format
-    ErrBadMsg ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__BAD_MSG)
-    // ErrBadCompression Local: Invalid compressed data
-    ErrBadCompression ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__BAD_COMPRESSION)
-    // ErrDestroy Local: Broker handle destroyed
-    ErrDestroy ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__DESTROY)
-    // ErrFail Local: Communication failure with broker
-    ErrFail ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__FAIL)
-    // ErrTransport Local: Broker transport failure
-    ErrTransport ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__TRANSPORT)
-    // ErrCritSysResource Local: Critical system resource failure
-    ErrCritSysResource ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE)
-    // ErrResolve Local: Host resolution failure
-    ErrResolve ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__RESOLVE)
-    // ErrMsgTimedOut Local: Message timed out
-    ErrMsgTimedOut ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__MSG_TIMED_OUT)
-    // ErrPartitionEOF Broker: No more messages
-    ErrPartitionEOF ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PARTITION_EOF)
-    // ErrUnknownPartition Local: Unknown partition
-    ErrUnknownPartition ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION)
-    // ErrFs Local: File or filesystem error
-    ErrFs ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__FS)
-    // ErrUnknownTopic Local: Unknown topic
-    ErrUnknownTopic ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC)
-    // ErrAllBrokersDown Local: All broker connections are down
-    ErrAllBrokersDown ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN)
-    // ErrInvalidArg Local: Invalid argument or configuration
-    ErrInvalidArg ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__INVALID_ARG)
-    // ErrTimedOut Local: Timed out
-    ErrTimedOut ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__TIMED_OUT)
-    // ErrQueueFull Local: Queue full
-    ErrQueueFull ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__QUEUE_FULL)
-    // ErrIsrInsuff Local: ISR count insufficient
-    ErrIsrInsuff ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__ISR_INSUFF)
-    // ErrNodeUpdate Local: Broker node update
-    ErrNodeUpdate ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NODE_UPDATE)
-    // ErrSsl Local: SSL error
-    ErrSsl ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__SSL)
-    // ErrWaitCoord Local: Waiting for coordinator
-    ErrWaitCoord ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__WAIT_COORD)
-    // ErrUnknownGroup Local: Unknown group
-    ErrUnknownGroup ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_GROUP)
-    // ErrInProgress Local: Operation in progress
-    ErrInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__IN_PROGRESS)
-    // ErrPrevInProgress Local: Previous operation in progress
-    ErrPrevInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS)
-    // ErrExistingSubscription Local: Existing subscription
-    ErrExistingSubscription ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION)
-    // ErrAssignPartitions Local: Assign partitions
-    ErrAssignPartitions ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS)
-    // ErrRevokePartitions Local: Revoke partitions
-    ErrRevokePartitions ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS)
-    // ErrConflict Local: Conflicting use
-    ErrConflict ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__CONFLICT)
-    // ErrState Local: Erroneous state
-    ErrState ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__STATE)
-    // ErrUnknownProtocol Local: Unknown protocol
-    ErrUnknownProtocol ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL)
-    // ErrNotImplemented Local: Not implemented
-    ErrNotImplemented ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED)
-    // ErrAuthentication Local: Authentication failure
-    ErrAuthentication ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__AUTHENTICATION)
-    // ErrNoOffset Local: No offset stored
-    ErrNoOffset ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NO_OFFSET)
-    // ErrOutdated Local: Outdated
-    ErrOutdated ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__OUTDATED)
-    // ErrTimedOutQueue Local: Timed out in queue
-    ErrTimedOutQueue ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE)
-    // ErrUnsupportedFeature Local: Required feature not supported by broker
-    ErrUnsupportedFeature ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE)
-    // ErrWaitCache Local: Awaiting cache update
-    ErrWaitCache ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__WAIT_CACHE)
-    // ErrIntr Local: Operation interrupted
-    ErrIntr ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__INTR)
-    // ErrKeySerialization Local: Key serialization error
-    ErrKeySerialization ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__KEY_SERIALIZATION)
-    // ErrValueSerialization Local: Value serialization error
-    ErrValueSerialization ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION)
-    // ErrKeyDeserialization Local: Key deserialization error
-    ErrKeyDeserialization ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION)
-    // ErrValueDeserialization Local: Value deserialization error
-    ErrValueDeserialization ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION)
-    // ErrPartial Local: Partial response
-    ErrPartial ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PARTIAL)
-    // ErrReadOnly Local: Read-only object
-    ErrReadOnly ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__READ_ONLY)
-    // ErrNoent Local: No such entry
-    ErrNoent ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NOENT)
-    // ErrUnderflow Local: Read underflow
-    ErrUnderflow ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNDERFLOW)
-    // ErrInvalidType Local: Invalid type
-    ErrInvalidType ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__INVALID_TYPE)
-    // ErrRetry Local: Retry operation
-    ErrRetry ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__RETRY)
-    // ErrPurgeQueue Local: Purged in queue
-    ErrPurgeQueue ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PURGE_QUEUE)
-    // ErrPurgeInflight Local: Purged in flight
-    ErrPurgeInflight ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PURGE_INFLIGHT)
-    // ErrFatal Local: Fatal error
-    ErrFatal ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__FATAL)
-    // ErrInconsistent Local: Inconsistent state
-    ErrInconsistent ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__INCONSISTENT)
-    // ErrGaplessGuarantee Local: Gap-less ordering would not be guaranteed if proceeding
-    ErrGaplessGuarantee ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE)
-    // ErrMaxPollExceeded Local: Maximum application poll interval (max.poll.interval.ms) exceeded
-    ErrMaxPollExceeded ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED)
-    // ErrUnknownBroker Local: Unknown broker
-    ErrUnknownBroker ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_BROKER)
-    // ErrNotConfigured Local: Functionality not configured
-    ErrNotConfigured ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NOT_CONFIGURED)
-    // ErrFenced Local: This instance has been fenced by a newer instance
-    ErrFenced ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__FENCED)
-    // ErrApplication Local: Application generated error
-    ErrApplication ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__APPLICATION)
-    // ErrAssignmentLost Local: Group partition assignment lost
-    ErrAssignmentLost ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__ASSIGNMENT_LOST)
-    // ErrNoop Local: No operation performed
-    ErrNoop ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NOOP)
-    // ErrAutoOffsetReset Local: No offset to automatically reset to
-    ErrAutoOffsetReset ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET)
-    // ErrUnknown Unknown broker error
-    ErrUnknown ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN)
-    // ErrNoError Success
-    ErrNoError ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NO_ERROR)
-    // ErrOffsetOutOfRange Broker: Offset out of range
-    ErrOffsetOutOfRange ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE)
-    // ErrInvalidMsg Broker: Invalid message
-    ErrInvalidMsg ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_MSG)
-    // ErrUnknownTopicOrPart Broker: Unknown topic or partition
-    ErrUnknownTopicOrPart ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART)
-    // ErrInvalidMsgSize Broker: Invalid message size
-    ErrInvalidMsgSize ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE)
-    // ErrLeaderNotAvailable Broker: Leader not available
-    ErrLeaderNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE)
-    // ErrNotLeaderForPartition Broker: Not leader for partition
-    ErrNotLeaderForPartition ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION)
-    // ErrRequestTimedOut Broker: Request timed out
-    ErrRequestTimedOut ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT)
-    // ErrBrokerNotAvailable Broker: Broker not available
-    ErrBrokerNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE)
-    // ErrReplicaNotAvailable Broker: Replica not available
-    ErrReplicaNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE)
-    // ErrMsgSizeTooLarge Broker: Message size too large
-    ErrMsgSizeTooLarge ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE)
-    // ErrStaleCtrlEpoch Broker: StaleControllerEpochCode
-    ErrStaleCtrlEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH)
-    // ErrOffsetMetadataTooLarge Broker: Offset metadata string too large
-    ErrOffsetMetadataTooLarge ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE)
-    // ErrNetworkException Broker: Broker disconnected before response received
-    ErrNetworkException ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION)
-    // ErrCoordinatorLoadInProgress Broker: Coordinator load in progress
-    ErrCoordinatorLoadInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS)
-    // ErrCoordinatorNotAvailable Broker: Coordinator not available
-    ErrCoordinatorNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE)
-    // ErrNotCoordinator Broker: Not coordinator
-    ErrNotCoordinator ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_COORDINATOR)
-    // ErrTopicException Broker: Invalid topic
-    ErrTopicException ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION)
-    // ErrRecordListTooLarge Broker: Message batch larger than configured server segment size
-    ErrRecordListTooLarge ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE)
-    // ErrNotEnoughReplicas Broker: Not enough in-sync replicas
-    ErrNotEnoughReplicas ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS)
-    // ErrNotEnoughReplicasAfterAppend Broker: Message(s) written to insufficient number of in-sync replicas
-    ErrNotEnoughReplicasAfterAppend ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND)
-    // ErrInvalidRequiredAcks Broker: Invalid required acks value
-    ErrInvalidRequiredAcks ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS)
-    // ErrIllegalGeneration Broker: Specified group generation id is not valid
-    ErrIllegalGeneration ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION)
-    // ErrInconsistentGroupProtocol Broker: Inconsistent group protocol
-    ErrInconsistentGroupProtocol ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL)
-    // ErrInvalidGroupID Broker: Invalid group.id
-    ErrInvalidGroupID ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_GROUP_ID)
-    // ErrUnknownMemberID Broker: Unknown member
-    ErrUnknownMemberID ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID)
-    // ErrInvalidSessionTimeout Broker: Invalid session timeout
-    ErrInvalidSessionTimeout ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT)
-    // ErrRebalanceInProgress Broker: Group rebalance in progress
-    ErrRebalanceInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS)
-    // ErrInvalidCommitOffsetSize Broker: Commit offset data size is not valid
-    ErrInvalidCommitOffsetSize ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE)
-    // ErrTopicAuthorizationFailed Broker: Topic authorization failed
-    ErrTopicAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED)
-    // ErrGroupAuthorizationFailed Broker: Group authorization failed
-    ErrGroupAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED)
-    // ErrClusterAuthorizationFailed Broker: Cluster authorization failed
-    ErrClusterAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED)
-    // ErrInvalidTimestamp Broker: Invalid timestamp
-    ErrInvalidTimestamp ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP)
-    // ErrUnsupportedSaslMechanism Broker: Unsupported SASL mechanism
-    ErrUnsupportedSaslMechanism ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM)
-    // ErrIllegalSaslState Broker: Request not valid in current SASL state
-    ErrIllegalSaslState ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE)
-    // ErrUnsupportedVersion Broker: API version not supported
-    ErrUnsupportedVersion ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION)
-    // ErrTopicAlreadyExists Broker: Topic already exists
-    ErrTopicAlreadyExists ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS)
-    // ErrInvalidPartitions Broker: Invalid number of partitions
-    ErrInvalidPartitions ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_PARTITIONS)
-    // ErrInvalidReplicationFactor Broker: Invalid replication factor
-    ErrInvalidReplicationFactor ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR)
-    // ErrInvalidReplicaAssignment Broker: Invalid replica assignment
-    ErrInvalidReplicaAssignment ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT)
-    // ErrInvalidConfig Broker: Configuration is invalid
-    ErrInvalidConfig ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_CONFIG)
-    // ErrNotController Broker: Not controller for cluster
-    ErrNotController ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_CONTROLLER)
-    // ErrInvalidRequest Broker: Invalid request
-    ErrInvalidRequest ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_REQUEST)
-    // ErrUnsupportedForMessageFormat Broker: Message format on broker does not support request
-    ErrUnsupportedForMessageFormat ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT)
-    // ErrPolicyViolation Broker: Policy violation
-    ErrPolicyViolation ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_POLICY_VIOLATION)
-    // ErrOutOfOrderSequenceNumber Broker: Broker received an out of order sequence number
-    ErrOutOfOrderSequenceNumber ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER)
-    // ErrDuplicateSequenceNumber Broker: Broker received a duplicate sequence number
-    ErrDuplicateSequenceNumber ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER)
-    // ErrInvalidProducerEpoch Broker: Producer attempted an operation with an old epoch
-    ErrInvalidProducerEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH)
-    // ErrInvalidTxnState Broker: Producer attempted a transactional operation in an invalid state
-    ErrInvalidTxnState ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_TXN_STATE)
-    // ErrInvalidProducerIDMapping Broker: Producer attempted to use a producer id which is not currently assigned to its transactional id
-    ErrInvalidProducerIDMapping ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING)
-    // ErrInvalidTransactionTimeout Broker: Transaction timeout is larger than the maximum value allowed by the broker's max.transaction.timeout.ms
-    ErrInvalidTransactionTimeout ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT)
-    // ErrConcurrentTransactions Broker: Producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing
-    ErrConcurrentTransactions ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS)
-    // ErrTransactionCoordinatorFenced Broker: Indicates that the transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer
-    ErrTransactionCoordinatorFenced ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED)
-    // ErrTransactionalIDAuthorizationFailed Broker: Transactional Id authorization failed
-    ErrTransactionalIDAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED)
-    // ErrSecurityDisabled Broker: Security features are disabled
-    ErrSecurityDisabled ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_SECURITY_DISABLED)
-    // ErrOperationNotAttempted Broker: Operation not attempted
-    ErrOperationNotAttempted ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED)
-    // ErrKafkaStorageError Broker: Disk error when trying to access log file on disk
-    ErrKafkaStorageError ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR)
-    // ErrLogDirNotFound Broker: The user-specified log directory is not found in the broker config
-    ErrLogDirNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND)
-    // ErrSaslAuthenticationFailed Broker: SASL Authentication failed
-    ErrSaslAuthenticationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED)
-    // ErrUnknownProducerID Broker: Unknown Producer Id
-    ErrUnknownProducerID ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID)
-    // ErrReassignmentInProgress Broker: Partition reassignment is in progress
-    ErrReassignmentInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS)
-    // ErrDelegationTokenAuthDisabled Broker: Delegation Token feature is not enabled
-    ErrDelegationTokenAuthDisabled ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED)
-    // ErrDelegationTokenNotFound Broker: Delegation Token is not found on server
-    ErrDelegationTokenNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND)
-    // ErrDelegationTokenOwnerMismatch Broker: Specified Principal is not valid Owner/Renewer
-    ErrDelegationTokenOwnerMismatch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH)
-    // ErrDelegationTokenRequestNotAllowed Broker: Delegation Token requests are not allowed on this connection
-    ErrDelegationTokenRequestNotAllowed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED)
-    // ErrDelegationTokenAuthorizationFailed Broker: Delegation Token authorization failed
-    ErrDelegationTokenAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED)
-    // ErrDelegationTokenExpired Broker: Delegation Token is expired
-    ErrDelegationTokenExpired ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED)
-    // ErrInvalidPrincipalType Broker: Supplied principalType is not supported
-    ErrInvalidPrincipalType ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE)
-    // ErrNonEmptyGroup Broker: The group is not empty
-    ErrNonEmptyGroup ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP)
-    // ErrGroupIDNotFound Broker: The group id does not exist
-    ErrGroupIDNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND)
-    // ErrFetchSessionIDNotFound Broker: The fetch session ID was not found
-    ErrFetchSessionIDNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND)
-    // ErrInvalidFetchSessionEpoch Broker: The fetch session epoch is invalid
-    ErrInvalidFetchSessionEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH)
-    // ErrListenerNotFound Broker: No matching listener
-    ErrListenerNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND)
-    // ErrTopicDeletionDisabled Broker: Topic deletion is disabled
-    ErrTopicDeletionDisabled ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED)
-    // ErrFencedLeaderEpoch Broker: Leader epoch is older than broker epoch
-    ErrFencedLeaderEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH)
-    // ErrUnknownLeaderEpoch Broker: Leader epoch is newer than broker epoch
-    ErrUnknownLeaderEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH)
-    // ErrUnsupportedCompressionType Broker: Unsupported compression type
-    ErrUnsupportedCompressionType ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE)
-    // ErrStaleBrokerEpoch Broker: Broker epoch has changed
-    ErrStaleBrokerEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_STALE_BROKER_EPOCH)
-    // ErrOffsetNotAvailable Broker: Leader high watermark is not caught up
-    ErrOffsetNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE)
-    // ErrMemberIDRequired Broker: Group member needs a valid member ID
-    ErrMemberIDRequired ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED)
-    // ErrPreferredLeaderNotAvailable Broker: Preferred leader was not available
-    ErrPreferredLeaderNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE)
-    // ErrGroupMaxSizeReached Broker: Consumer group has reached maximum size
-    ErrGroupMaxSizeReached ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED)
-    // ErrFencedInstanceID Broker: Static consumer fenced by other consumer with same group.instance.id
-    ErrFencedInstanceID ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID)
-    // ErrEligibleLeadersNotAvailable Broker: Eligible partition leaders are not available
-    ErrEligibleLeadersNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_ELIGIBLE_LEADERS_NOT_AVAILABLE)
-    // ErrElectionNotNeeded Broker: Leader election not needed for topic partition
-    ErrElectionNotNeeded ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_ELECTION_NOT_NEEDED)
-    // ErrNoReassignmentInProgress Broker: No partition reassignment is in progress
-    ErrNoReassignmentInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NO_REASSIGNMENT_IN_PROGRESS)
-    // ErrGroupSubscribedToTopic Broker: Deleting offsets of a topic while the consumer group is subscribed to it
-    ErrGroupSubscribedToTopic ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_GROUP_SUBSCRIBED_TO_TOPIC)
-    // ErrInvalidRecord Broker: Broker failed to validate record
-    ErrInvalidRecord ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_RECORD)
-    // ErrUnstableOffsetCommit Broker: There are unstable offsets that need to be cleared
-    ErrUnstableOffsetCommit ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT)
-    // ErrThrottlingQuotaExceeded Broker: Throttling quota has been exceeded
-    ErrThrottlingQuotaExceeded ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_THROTTLING_QUOTA_EXCEEDED)
-    // ErrProducerFenced Broker: There is a newer producer with the same transactionalId which fences the current one
-    ErrProducerFenced ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_PRODUCER_FENCED)
-    // ErrResourceNotFound Broker: Request illegally referred to resource that does not exist
-    ErrResourceNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_RESOURCE_NOT_FOUND)
-    // ErrDuplicateResource Broker: Request illegally referred to the same resource twice
-    ErrDuplicateResource ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DUPLICATE_RESOURCE)
-    // ErrUnacceptableCredential Broker: Requested credential would not meet criteria for acceptability
-    ErrUnacceptableCredential ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNACCEPTABLE_CREDENTIAL)
-    // ErrInconsistentVoterSet Broker: Indicates that the either the sender or recipient of a voter-only request is not one of the expected voters
-    ErrInconsistentVoterSet ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INCONSISTENT_VOTER_SET)
-    // ErrInvalidUpdateVersion Broker: Invalid update version
-    ErrInvalidUpdateVersion ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_UPDATE_VERSION)
-    // ErrFeatureUpdateFailed Broker: Unable to update finalized features due to server error
-    ErrFeatureUpdateFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_FEATURE_UPDATE_FAILED)
-    // ErrPrincipalDeserializationFailure Broker: Request principal deserialization failed during forwarding
-    ErrPrincipalDeserializationFailure ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_PRINCIPAL_DESERIALIZATION_FAILURE)
-)
-

- func (ErrorCode) - - String - - -

-
func (c ErrorCode) String() string
-

- String returns a human readable representation of an error code -

-

- type - - Event - - -

-

- Event generic interface -

-
type Event interface {
-    // String returns a human-readable representation of the event
-    String() string
-}
-

- type - - Handle - - -

-

- Handle represents a generic client handle containing common parts for -both Producer and Consumer. -

-
type Handle interface {
-    // SetOAuthBearerToken sets the the data to be transmitted
-    // to a broker during SASL/OAUTHBEARER authentication. It will return nil
-    // on success, otherwise an error if:
-    // 1) the token data is invalid (meaning an expiration time in the past
-    // or either a token value or an extension key or value that does not meet
-    // the regular expression requirements as per
-    // https://tools.ietf.org/html/rfc7628#section-3.1);
-    // 2) SASL/OAUTHBEARER is not supported by the underlying librdkafka build;
-    // 3) SASL/OAUTHBEARER is supported but is not configured as the client's
-    // authentication mechanism.
-    SetOAuthBearerToken(oauthBearerToken OAuthBearerToken) error
-
-    // SetOAuthBearerTokenFailure sets the error message describing why token
-    // retrieval/setting failed; it also schedules a new token refresh event for 10
-    // seconds later so the attempt may be retried. It will return nil on
-    // success, otherwise an error if:
-    // 1) SASL/OAUTHBEARER is not supported by the underlying librdkafka build;
-    // 2) SASL/OAUTHBEARER is supported but is not configured as the client's
-    // authentication mechanism.
-    SetOAuthBearerTokenFailure(errstr string) error
-    // contains filtered or unexported methods
-}
- -

- Header represents a single Kafka message header. -

-

- Message headers are made up of a list of Header elements, retaining their original insert -order and allowing for duplicate Keys. -

-

- Key is a human readable string identifying the header. -Value is the key's binary value, Kafka does not put any restrictions on the format of -of the Value but it should be made relatively compact. -The value may be a byte array, empty, or nil. -

-

- NOTE: Message headers are not available on producer delivery report messages. -

-
type Header struct {
-    Key   string // Header name (utf-8 string)
-    Value []byte // Header value (nil, empty, or binary)
-}
-
-

- func (Header) - - String - - -

-
func (h Header) String() string
-

- String returns the Header Key and data in a human representable possibly truncated form -suitable for displaying to the user. -

-

- type - - LogEvent - - -

-

- LogEvent represent the log from librdkafka internal log queue -

-
type LogEvent struct {
-    Name      string    // Name of client instance
-    Tag       string    // Log tag that provides context to the log Message (e.g., "METADATA" or "GRPCOORD")
-    Message   string    // Log message
-    Level     int       // Log syslog level, lower is more critical.
-    Timestamp time.Time // Log timestamp
-}
-
-

- func (LogEvent) - - String - - -

-
func (logEvent LogEvent) String() string
-

- type - - Message - - -

-

- Message represents a Kafka message -

-
type Message struct {
-    TopicPartition TopicPartition
-    Value          []byte
-    Key            []byte
-    Timestamp      time.Time
-    TimestampType  TimestampType
-    Opaque         interface{}
-    Headers        []Header
-}
-
-

- func (*Message) - - String - - -

-
func (m *Message) String() string
-

- String returns a human readable representation of a Message. -Key and payload are not represented. -

-

- type - - Metadata - - -

-

- Metadata contains broker and topic metadata for all (matching) topics -

-
type Metadata struct {
-    Brokers []BrokerMetadata
-    Topics  map[string]TopicMetadata
-
-    OriginatingBroker BrokerMetadata
-}
-
-

- type - - MockCluster - - -

-

- MockCluster represents a Kafka mock cluster instance which can be used -for testing. -

-
type MockCluster struct {
-    // contains filtered or unexported fields
-}
-
-

- func - - NewMockCluster - - -

-
func NewMockCluster(brokerCount int) (*MockCluster, error)
-

- NewMockCluster provides a mock Kafka cluster with a configurable -number of brokers that support a reasonable subset of Kafka protocol -operations, error injection, etc. -

-

- Mock clusters provide localhost listeners that can be used as the bootstrap -servers by multiple Kafka client instances. -

-

- Currently supported functionality: -- Producer -- Idempotent Producer -- Transactional Producer -- Low-level consumer -- High-level balanced consumer groups with offset commits -- Topic Metadata and auto creation -

-

- Warning THIS IS AN EXPERIMENTAL API, SUBJECT TO CHANGE OR REMOVAL. -

-

- func (*MockCluster) - - BootstrapServers - - -

-
func (mc *MockCluster) BootstrapServers() string
-

- BootstrapServers returns the bootstrap.servers property for this MockCluster -

-

- func (*MockCluster) - - Close - - -

-
func (mc *MockCluster) Close()
-

- Close and destroy the MockCluster -

-

- type - - OAuthBearerToken - - -

-

- OAuthBearerToken represents the data to be transmitted -to a broker during SASL/OAUTHBEARER authentication. -

-
type OAuthBearerToken struct {
-    // Token value, often (but not necessarily) a JWS compact serialization
-    // as per https://tools.ietf.org/html/rfc7515#section-3.1; it must meet
-    // the regular expression for a SASL/OAUTHBEARER value defined at
-    // https://tools.ietf.org/html/rfc7628#section-3.1
-    TokenValue string
-    // Metadata about the token indicating when it expires (local time);
-    // it must represent a time in the future
-    Expiration time.Time
-    // Metadata about the token indicating the Kafka principal name
-    // to which it applies (for example, "admin")
-    Principal string
-    // SASL extensions, if any, to be communicated to the broker during
-    // authentication (all keys and values of which must meet the regular
-    // expressions defined at https://tools.ietf.org/html/rfc7628#section-3.1,
-    // and it must not contain the reserved "auth" key)
-    Extensions map[string]string
-}
-
-

- type - - OAuthBearerTokenRefresh - - -

-

- OAuthBearerTokenRefresh indicates token refresh is required -

-
type OAuthBearerTokenRefresh struct {
-    // Config is the value of the sasl.oauthbearer.config property
-    Config string
-}
-
-

- func (OAuthBearerTokenRefresh) - - String - - -

-
func (o OAuthBearerTokenRefresh) String() string
-

- type - - Offset - - -

-

- Offset type (int64) with support for canonical names -

-
type Offset int64
-

- func - - NewOffset - - -

-
func NewOffset(offset interface{}) (Offset, error)
-

- NewOffset creates a new Offset using the provided logical string, or an -absolute int64 offset value. -Logical offsets: "beginning", "earliest", "end", "latest", "unset", "invalid", "stored" -

-

- func - - OffsetTail - - -

-
func OffsetTail(relativeOffset Offset) Offset
-

- OffsetTail returns the logical offset relativeOffset from current end of partition -

-

- func (*Offset) - - Set - - -

-
func (o *Offset) Set(offset interface{}) error
-

- Set offset value, see NewOffset() -

-

- func (Offset) - - String - - -

-
func (o Offset) String() string
-

- type - - OffsetsCommitted - - -

-

- OffsetsCommitted reports committed offsets -

-
type OffsetsCommitted struct {
-    Error   error
-    Offsets []TopicPartition
-}
-
-

- func (OffsetsCommitted) - - String - - -

-
func (o OffsetsCommitted) String() string
-

- type - - PartitionEOF - - -

-

- PartitionEOF consumer reached end of partition -Needs to be explicitly enabled by setting the `enable.partition.eof` -configuration property to true. -

-
type PartitionEOF TopicPartition
-

- func (PartitionEOF) - - String - - -

-
func (p PartitionEOF) String() string
-

- type - - PartitionMetadata - - -

-

- PartitionMetadata contains per-partition metadata -

-
type PartitionMetadata struct {
-    ID       int32
-    Error    Error
-    Leader   int32
-    Replicas []int32
-    Isrs     []int32
-}
-
-

- type - - PartitionsSpecification - - -

-

- PartitionsSpecification holds parameters for creating additional partitions for a topic. -PartitionsSpecification is analogous to NewPartitions in the Java Topic Admin API. -

-
type PartitionsSpecification struct {
-    // Topic to create more partitions for.
-    Topic string
-    // New partition count for topic, must be higher than current partition count.
-    IncreaseTo int
-    // (Optional) Explicit replica assignment. The outer array is
-    // indexed by the new partition index (i.e., 0 for the first added
-    // partition), while the inner per-partition array
-    // contains the replica broker ids. The first broker in each
-    // broker id list will be the preferred replica.
-    ReplicaAssignment [][]int32
-}
-
-

- type - - Producer - - -

-

- Producer implements a High-level Apache Kafka Producer instance -

-
type Producer struct {
-    // contains filtered or unexported fields
-}
-
-

- func - - NewProducer - - -

-
func NewProducer(conf *ConfigMap) (*Producer, error)
-

- NewProducer creates a new high-level Producer instance. -

-

- conf is a *ConfigMap with standard librdkafka configuration properties. -

-

- Supported special configuration properties (type, default): -

-
go.batch.producer (bool, false) - EXPERIMENTAL: Enable batch producer (for increased performance).
-                                  These batches do not relate to Kafka message batches in any way.
-                                  Note: timestamps and headers are not supported with this interface.
-go.delivery.reports (bool, true) - Forward per-message delivery reports to the
-                                   Events() channel.
-go.delivery.report.fields (string, "key,value") - Comma separated list of fields to enable for delivery reports.
-                                    Allowed values: all, none (or empty string), key, value, headers
-                                    Warning: There is a performance penalty to include headers in the delivery report.
-go.events.channel.size (int, 1000000) - Events().
-go.produce.channel.size (int, 1000000) - ProduceChannel() buffer size (in number of messages)
-go.logs.channel.enable (bool, false) - Forward log to Logs() channel.
-go.logs.channel (chan kafka.LogEvent, nil) - Forward logs to application-provided channel instead of Logs(). Requires go.logs.channel.enable=true.
-
-

- func (*Producer) - - AbortTransaction - - -

-
func (p *Producer) AbortTransaction(ctx context.Context) error
-

- AbortTransaction aborts the ongoing transaction. -

-

- This function should also be used to recover from non-fatal abortable -transaction errors. -

-

- Any outstanding messages will be purged and fail with -`ErrPurgeInflight` or `ErrPurgeQueue`. -

-

- Parameters: -

-
* `ctx` - The maximum amount of time to block, or nil for indefinite.
-
-

- Note: This function will block until all outstanding messages are purged -and the transaction abort request has been successfully -handled by the transaction coordinator, or until the `ctx` expires, -which ever comes first. On timeout the application may -call the function again. -

-

- Note: Will automatically call `Purge()` and `Flush()` to ensure all queued -and in-flight messages are purged before attempting to abort the transaction. -The application MUST serve the `producer.Events()` channel for delivery -reports in a separate go-routine during this time. -

-

- Returns nil on success or an error object on failure. -Check whether the returned error object permits retrying -by calling `err.(kafka.Error).IsRetriable()`, or whether a fatal error -has been raised by calling `err.(kafka.Error).IsFatal()`. -

-

- func (*Producer) - - BeginTransaction - - -

-
func (p *Producer) BeginTransaction() error
-

- BeginTransaction starts a new transaction. -

-

- `InitTransactions()` must have been called successfully (once) -before this function is called. -

-

- Any messages produced, offsets sent (`SendOffsetsToTransaction()`), -etc, after the successful return of this function will be part of -the transaction and committed or aborted atomatically. -

-

- Finish the transaction by calling `CommitTransaction()` or -abort the transaction by calling `AbortTransaction()`. -

-

- Returns nil on success or an error object on failure. -Check whether a fatal error has been raised by -calling `err.(kafka.Error).IsFatal()`. -

-

- Note: With the transactional producer, `Produce()`, et.al, are only -allowed during an on-going transaction, as started with this function. -Any produce call outside an on-going transaction, or for a failed -transaction, will fail. -

-

- func (*Producer) - - Close - - -

-
func (p *Producer) Close()
-

- Close a Producer instance. -The Producer object or its channels are no longer usable after this call. -

-

- func (*Producer) - - CommitTransaction - - -

-
func (p *Producer) CommitTransaction(ctx context.Context) error
-

- CommitTransaction commits the current transaction. -

-

- Any outstanding messages will be flushed (delivered) before actually -committing the transaction. -

-

- If any of the outstanding messages fail permanently the current -transaction will enter the abortable error state and this -function will return an abortable error, in this case the application -must call `AbortTransaction()` before attempting a new -transaction with `BeginTransaction()`. -

-

- Parameters: -

-
* `ctx` - The maximum amount of time to block, or nil for indefinite.
-
-

- Note: This function will block until all outstanding messages are -delivered and the transaction commit request has been successfully -handled by the transaction coordinator, or until the `ctx` expires, -which ever comes first. On timeout the application may -call the function again. -

-

- Note: Will automatically call `Flush()` to ensure all queued -messages are delivered before attempting to commit the transaction. -The application MUST serve the `producer.Events()` channel for delivery -reports in a separate go-routine during this time. -

-

- Returns nil on success or an error object on failure. -Check whether the returned error object permits retrying -by calling `err.(kafka.Error).IsRetriable()`, or whether an abortable -or fatal error has been raised by calling -`err.(kafka.Error).TxnRequiresAbort()` or `err.(kafka.Error).IsFatal()` -respectively. -

-

- func (*Producer) - - Events - - -

-
func (p *Producer) Events() chan Event
-

- Events returns the Events channel (read) -

-

- func (*Producer) - - Flush - - -

-
func (p *Producer) Flush(timeoutMs int) int
-

- Flush and wait for outstanding messages and requests to complete delivery. -Includes messages on ProduceChannel. -Runs until value reaches zero or on timeoutMs. -Returns the number of outstanding events still un-flushed. -

-

- func (*Producer) - - GetFatalError - - -

-
func (p *Producer) GetFatalError() error
-

- GetFatalError returns an Error object if the client instance has raised a fatal error, else nil. -

-

- func (*Producer) - - GetMetadata - - -

-
func (p *Producer) GetMetadata(topic *string, allTopics bool, timeoutMs int) (*Metadata, error)
-

- GetMetadata queries broker for cluster and topic metadata. -If topic is non-nil only information about that topic is returned, else if -allTopics is false only information about locally used topics is returned, -else information about all topics is returned. -GetMetadata is equivalent to listTopics, describeTopics and describeCluster in the Java API. -

-

- func (*Producer) - - InitTransactions - - -

-
func (p *Producer) InitTransactions(ctx context.Context) error
-

- InitTransactions Initializes transactions for the producer instance. -

-

- This function ensures any transactions initiated by previous instances -of the producer with the same `transactional.id` are completed. -If the previous instance failed with a transaction in progress the -previous transaction will be aborted. -This function needs to be called before any other transactional or -produce functions are called when the `transactional.id` is configured. -

-

- If the last transaction had begun completion (following transaction commit) -but not yet finished, this function will await the previous transaction's -completion. -

-

- When any previous transactions have been fenced this function -will acquire the internal producer id and epoch, used in all future -transactional messages issued by this producer instance. -

-

- Upon successful return from this function the application has to perform at -least one of the following operations within `transaction.timeout.ms` to -avoid timing out the transaction on the broker: -

-
* `Produce()` (et.al)
-* `SendOffsetsToTransaction()`
-* `CommitTransaction()`
-* `AbortTransaction()`
-
-

- Parameters: -

-
* `ctx` - The maximum time to block, or nil for indefinite.
-          On timeout the operation may continue in the background,
-          depending on state, and it is okay to call `InitTransactions()`
-          again.
-
-

- Returns nil on success or an error on failure. -Check whether the returned error object permits retrying -by calling `err.(kafka.Error).IsRetriable()`, or whether a fatal -error has been raised by calling `err.(kafka.Error).IsFatal()`. -

-

- func (*Producer) - - Len - - -

-
func (p *Producer) Len() int
-

- Len returns the number of messages and requests waiting to be transmitted to the broker -as well as delivery reports queued for the application. -Includes messages on ProduceChannel. -

-

- func (*Producer) - - Logs - - -

-
func (p *Producer) Logs() chan LogEvent
-

- Logs returns the Log channel (if enabled), else nil -

-

- func (*Producer) - - OffsetsForTimes - - -

-
func (p *Producer) OffsetsForTimes(times []TopicPartition, timeoutMs int) (offsets []TopicPartition, err error)
-

- OffsetsForTimes looks up offsets by timestamp for the given partitions. -

-

- The returned offset for each partition is the earliest offset whose -timestamp is greater than or equal to the given timestamp in the -corresponding partition. If the provided timestamp exceeds that of the -last message in the partition, a value of -1 will be returned. -

-

- The timestamps to query are represented as `.Offset` in the `times` -argument and the looked up offsets are represented as `.Offset` in the returned -`offsets` list. -

-

- The function will block for at most timeoutMs milliseconds. -

-

- Duplicate Topic+Partitions are not supported. -Per-partition errors may be returned in the `.Error` field. -

-

- func (*Producer) - - Produce - - -

-
func (p *Producer) Produce(msg *Message, deliveryChan chan Event) error
-

- Produce single message. -This is an asynchronous call that enqueues the message on the internal -transmit queue, thus returning immediately. -The delivery report will be sent on the provided deliveryChan if specified, -or on the Producer object's Events() channel if not. -msg.Timestamp requires librdkafka >= 0.9.4 (else returns ErrNotImplemented), -api.version.request=true, and broker >= 0.10.0.0. -msg.Headers requires librdkafka >= 0.11.4 (else returns ErrNotImplemented), -api.version.request=true, and broker >= 0.11.0.0. -Returns an error if message could not be enqueued. -

-

- func (*Producer) - - ProduceChannel - - -

-
func (p *Producer) ProduceChannel() chan *Message
-

- ProduceChannel returns the produce *Message channel (write) -

-

- func (*Producer) - - Purge - - -

-
func (p *Producer) Purge(flags int) error
-

- Purge messages currently handled by this producer instance. -

-

- flags is a combination of PurgeQueue, PurgeInFlight and PurgeNonBlocking. -

-

- The application will need to call Poll(), Flush() or read the Events() channel -after this call to serve delivery reports for the purged messages. -

-

- Messages purged from internal queues fail with the delivery report -error code set to ErrPurgeQueue, while purged messages that -are in-flight to or from the broker will fail with the error code set to -ErrPurgeInflight. -

-

- Warning: Purging messages that are in-flight to or from the broker -will ignore any sub-sequent acknowledgement for these messages -received from the broker, effectively making it impossible -for the application to know if the messages were successfully -produced or not. This may result in duplicate messages if the -application retries these messages at a later time. -

-

- Note: This call may block for a short time while background thread -queues are purged. -

-

- Returns nil on success, ErrInvalidArg if the purge flags are invalid or unknown. -

-

- func (*Producer) - - QueryWatermarkOffsets - - -

-
func (p *Producer) QueryWatermarkOffsets(topic string, partition int32, timeoutMs int) (low, high int64, err error)
-

- QueryWatermarkOffsets returns the broker's low and high offsets for the given topic -and partition. -

-

- func (*Producer) - - SendOffsetsToTransaction - - -

-
func (p *Producer) SendOffsetsToTransaction(ctx context.Context, offsets []TopicPartition, consumerMetadata *ConsumerGroupMetadata) error
-

- SendOffsetsToTransaction sends a list of topic partition offsets to the -consumer group coordinator for `consumerMetadata`, and marks the offsets -as part part of the current transaction. -These offsets will be considered committed only if the transaction is -committed successfully. -

-

- The offsets should be the next message your application will consume, -i.e., the last processed message's offset + 1 for each partition. -Either track the offsets manually during processing or use -`consumer.Position()` (on the consumer) to get the current offsets for -the partitions assigned to the consumer. -

-

- Use this method at the end of a consume-transform-produce loop prior -to committing the transaction with `CommitTransaction()`. -

-

- Parameters: -

-
* `ctx` - The maximum amount of time to block, or nil for indefinite.
-* `offsets` - List of offsets to commit to the consumer group upon
-              successful commit of the transaction. Offsets should be
-              the next message to consume, e.g., last processed message + 1.
-* `consumerMetadata` - The current consumer group metadata as returned by
-              `consumer.GetConsumerGroupMetadata()` on the consumer
-              instance the provided offsets were consumed from.
-
-

- Note: The consumer must disable auto commits (set `enable.auto.commit` to false on the consumer). -

-

- Note: Logical and invalid offsets (e.g., OffsetInvalid) in -`offsets` will be ignored. If there are no valid offsets in -`offsets` the function will return nil and no action will be taken. -

-

- Returns nil on success or an error object on failure. -Check whether the returned error object permits retrying -by calling `err.(kafka.Error).IsRetriable()`, or whether an abortable -or fatal error has been raised by calling -`err.(kafka.Error).TxnRequiresAbort()` or `err.(kafka.Error).IsFatal()` -respectively. -

-

- func (*Producer) - - SetOAuthBearerToken - - -

-
func (p *Producer) SetOAuthBearerToken(oauthBearerToken OAuthBearerToken) error
-

- SetOAuthBearerToken sets the the data to be transmitted -to a broker during SASL/OAUTHBEARER authentication. It will return nil -on success, otherwise an error if: -1) the token data is invalid (meaning an expiration time in the past -or either a token value or an extension key or value that does not meet -the regular expression requirements as per - - https://tools.ietf.org/html/rfc7628#section-3.1 - - ); -2) SASL/OAUTHBEARER is not supported by the underlying librdkafka build; -3) SASL/OAUTHBEARER is supported but is not configured as the client's -authentication mechanism. -

-

- func (*Producer) - - SetOAuthBearerTokenFailure - - -

-
func (p *Producer) SetOAuthBearerTokenFailure(errstr string) error
-

- SetOAuthBearerTokenFailure sets the error message describing why token -retrieval/setting failed; it also schedules a new token refresh event for 10 -seconds later so the attempt may be retried. It will return nil on -success, otherwise an error if: -1) SASL/OAUTHBEARER is not supported by the underlying librdkafka build; -2) SASL/OAUTHBEARER is supported but is not configured as the client's -authentication mechanism. -

-

- func (*Producer) - - String - - -

-
func (p *Producer) String() string
-

- String returns a human readable name for a Producer instance -

-

- func (*Producer) - - TestFatalError - - -

-
func (p *Producer) TestFatalError(code ErrorCode, str string) ErrorCode
-

- TestFatalError triggers a fatal error in the underlying client. -This is to be used strictly for testing purposes. -

-

- type - - RebalanceCb - - -

-

- RebalanceCb provides a per-Subscribe*() rebalance event callback. -The passed Event will be either AssignedPartitions or RevokedPartitions -

-
type RebalanceCb func(*Consumer, Event) error
-

- type - - ResourcePatternType - - -

-

- ResourcePatternType enumerates the different types of Kafka resource patterns. -

-
type ResourcePatternType int
-

- func - - ResourcePatternTypeFromString - - -

-
func ResourcePatternTypeFromString(patternTypeString string) (ResourcePatternType, error)
-

- ResourcePatternTypeFromString translates a resource pattern type name to -a ResourcePatternType value. -

-

- func (ResourcePatternType) - - String - - -

-
func (t ResourcePatternType) String() string
-

- String returns the human-readable representation of a ResourcePatternType -

-

- type - - ResourceType - - -

-

- ResourceType represents an Apache Kafka resource type -

-
type ResourceType int
-

- func - - ResourceTypeFromString - - -

-
func ResourceTypeFromString(typeString string) (ResourceType, error)
-

- ResourceTypeFromString translates a resource type name/string to -a ResourceType value. -

-

- func (ResourceType) - - String - - -

-
func (t ResourceType) String() string
-

- String returns the human-readable representation of a ResourceType -

-

- type - - RevokedPartitions - - -

-

- RevokedPartitions consumer group rebalance event: revoked partition set -

-
type RevokedPartitions struct {
-    Partitions []TopicPartition
-}
-
-

- func (RevokedPartitions) - - String - - -

-
func (e RevokedPartitions) String() string
-

- type - - Stats - - -

-

- Stats statistics event -

-
type Stats struct {
-    // contains filtered or unexported fields
-}
-
-

- func (Stats) - - String - - -

-
func (e Stats) String() string
-

- type - - TimestampType - - -

-

- TimestampType is a the Message timestamp type or source -

-
type TimestampType int
-

- func (TimestampType) - - String - - -

-
func (t TimestampType) String() string
-

- type - - TopicMetadata - - -

-

- TopicMetadata contains per-topic metadata -

-
type TopicMetadata struct {
-    Topic      string
-    Partitions []PartitionMetadata
-    Error      Error
-}
-
-

- type - - TopicPartition - - -

-

- TopicPartition is a generic placeholder for a Topic+Partition and optionally Offset. -

-
type TopicPartition struct {
-    Topic     *string
-    Partition int32
-    Offset    Offset
-    Metadata  *string
-    Error     error
-}
-
-

- func (TopicPartition) - - String - - -

-
func (p TopicPartition) String() string
-

- type - - TopicPartitions - - -

-

- TopicPartitions is a slice of TopicPartitions that also implements -the sort interface -

-
type TopicPartitions []TopicPartition
-

- func (TopicPartitions) - - Len - - -

-
func (tps TopicPartitions) Len() int
-

- func (TopicPartitions) - - Less - - -

-
func (tps TopicPartitions) Less(i, j int) bool
-

- func (TopicPartitions) - - Swap - - -

-
func (tps TopicPartitions) Swap(i, j int)
-

- type - - TopicResult - - -

-

- TopicResult provides per-topic operation result (error) information. -

-
type TopicResult struct {
-    // Topic name
-    Topic string
-    // Error, if any, of result. Check with `Error.Code() != ErrNoError`.
-    Error Error
-}
-
-

- func (TopicResult) - - String - - -

-
func (t TopicResult) String() string
-

- String returns a human-readable representation of a TopicResult. -

-

- type - - TopicSpecification - - -

-

- TopicSpecification holds parameters for creating a new topic. -TopicSpecification is analogous to NewTopic in the Java Topic Admin API. -

-
type TopicSpecification struct {
-    // Topic name to create.
-    Topic string
-    // Number of partitions in topic.
-    NumPartitions int
-    // Default replication factor for the topic's partitions, or zero
-    // if an explicit ReplicaAssignment is set.
-    ReplicationFactor int
-    // (Optional) Explicit replica assignment. The outer array is
-    // indexed by the partition number, while the inner per-partition array
-    // contains the replica broker ids. The first broker in each
-    // broker id list will be the preferred replica.
-    ReplicaAssignment [][]int32
-    // Topic configuration.
-    Config map[string]string
-}
-
- -
- -
- - - - diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/generated_errors.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/generated_errors.go deleted file mode 100644 index d15e8f5e..00000000 --- a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/generated_errors.go +++ /dev/null @@ -1,337 +0,0 @@ -package kafka -// Copyright 2016-2022 Confluent Inc. -// AUTOMATICALLY GENERATED ON 2022-08-01 22:56:19.86222475 +0200 CEST m=+0.000294735 USING librdkafka 1.9.2 - -/* -#include "select_rdkafka.h" -*/ -import "C" - -// ErrorCode is the integer representation of local and broker error codes -type ErrorCode int - -// String returns a human readable representation of an error code -func (c ErrorCode) String() string { - return C.GoString(C.rd_kafka_err2str(C.rd_kafka_resp_err_t(c))) -} - -const ( - // ErrBadMsg Local: Bad message format - ErrBadMsg ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__BAD_MSG) - // ErrBadCompression Local: Invalid compressed data - ErrBadCompression ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__BAD_COMPRESSION) - // ErrDestroy Local: Broker handle destroyed - ErrDestroy ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__DESTROY) - // ErrFail Local: Communication failure with broker - ErrFail ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__FAIL) - // ErrTransport Local: Broker transport failure - ErrTransport ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__TRANSPORT) - // ErrCritSysResource Local: Critical system resource failure - ErrCritSysResource ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE) - // ErrResolve Local: Host resolution failure - ErrResolve ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__RESOLVE) - // ErrMsgTimedOut Local: Message timed out - ErrMsgTimedOut ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__MSG_TIMED_OUT) - // ErrPartitionEOF Broker: No more messages - ErrPartitionEOF ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PARTITION_EOF) - // ErrUnknownPartition Local: Unknown partition - ErrUnknownPartition ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION) - // ErrFs Local: File or filesystem error - ErrFs ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__FS) - // ErrUnknownTopic Local: Unknown topic - ErrUnknownTopic ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC) - // ErrAllBrokersDown Local: All broker connections are down - ErrAllBrokersDown ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN) - // ErrInvalidArg Local: Invalid argument or configuration - ErrInvalidArg ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__INVALID_ARG) - // ErrTimedOut Local: Timed out - ErrTimedOut ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__TIMED_OUT) - // ErrQueueFull Local: Queue full - ErrQueueFull ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__QUEUE_FULL) - // ErrIsrInsuff Local: ISR count insufficient - ErrIsrInsuff ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__ISR_INSUFF) - // ErrNodeUpdate Local: Broker node update - ErrNodeUpdate ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NODE_UPDATE) - // ErrSsl Local: SSL error - ErrSsl ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__SSL) - // ErrWaitCoord Local: Waiting for coordinator - ErrWaitCoord ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__WAIT_COORD) - // ErrUnknownGroup Local: Unknown group - ErrUnknownGroup ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_GROUP) - // ErrInProgress Local: Operation in progress - ErrInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__IN_PROGRESS) - // ErrPrevInProgress Local: Previous operation in progress - ErrPrevInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS) - // ErrExistingSubscription Local: Existing subscription - ErrExistingSubscription ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION) - // ErrAssignPartitions Local: Assign partitions - ErrAssignPartitions ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) - // ErrRevokePartitions Local: Revoke partitions - ErrRevokePartitions ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS) - // ErrConflict Local: Conflicting use - ErrConflict ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__CONFLICT) - // ErrState Local: Erroneous state - ErrState ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__STATE) - // ErrUnknownProtocol Local: Unknown protocol - ErrUnknownProtocol ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL) - // ErrNotImplemented Local: Not implemented - ErrNotImplemented ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED) - // ErrAuthentication Local: Authentication failure - ErrAuthentication ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__AUTHENTICATION) - // ErrNoOffset Local: No offset stored - ErrNoOffset ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NO_OFFSET) - // ErrOutdated Local: Outdated - ErrOutdated ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__OUTDATED) - // ErrTimedOutQueue Local: Timed out in queue - ErrTimedOutQueue ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE) - // ErrUnsupportedFeature Local: Required feature not supported by broker - ErrUnsupportedFeature ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE) - // ErrWaitCache Local: Awaiting cache update - ErrWaitCache ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__WAIT_CACHE) - // ErrIntr Local: Operation interrupted - ErrIntr ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__INTR) - // ErrKeySerialization Local: Key serialization error - ErrKeySerialization ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__KEY_SERIALIZATION) - // ErrValueSerialization Local: Value serialization error - ErrValueSerialization ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION) - // ErrKeyDeserialization Local: Key deserialization error - ErrKeyDeserialization ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION) - // ErrValueDeserialization Local: Value deserialization error - ErrValueDeserialization ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION) - // ErrPartial Local: Partial response - ErrPartial ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PARTIAL) - // ErrReadOnly Local: Read-only object - ErrReadOnly ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__READ_ONLY) - // ErrNoent Local: No such entry - ErrNoent ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NOENT) - // ErrUnderflow Local: Read underflow - ErrUnderflow ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNDERFLOW) - // ErrInvalidType Local: Invalid type - ErrInvalidType ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__INVALID_TYPE) - // ErrRetry Local: Retry operation - ErrRetry ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__RETRY) - // ErrPurgeQueue Local: Purged in queue - ErrPurgeQueue ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PURGE_QUEUE) - // ErrPurgeInflight Local: Purged in flight - ErrPurgeInflight ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__PURGE_INFLIGHT) - // ErrFatal Local: Fatal error - ErrFatal ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__FATAL) - // ErrInconsistent Local: Inconsistent state - ErrInconsistent ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__INCONSISTENT) - // ErrGaplessGuarantee Local: Gap-less ordering would not be guaranteed if proceeding - ErrGaplessGuarantee ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE) - // ErrMaxPollExceeded Local: Maximum application poll interval (max.poll.interval.ms) exceeded - ErrMaxPollExceeded ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED) - // ErrUnknownBroker Local: Unknown broker - ErrUnknownBroker ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__UNKNOWN_BROKER) - // ErrNotConfigured Local: Functionality not configured - ErrNotConfigured ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NOT_CONFIGURED) - // ErrFenced Local: This instance has been fenced by a newer instance - ErrFenced ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__FENCED) - // ErrApplication Local: Application generated error - ErrApplication ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__APPLICATION) - // ErrAssignmentLost Local: Group partition assignment lost - ErrAssignmentLost ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__ASSIGNMENT_LOST) - // ErrNoop Local: No operation performed - ErrNoop ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__NOOP) - // ErrAutoOffsetReset Local: No offset to automatically reset to - ErrAutoOffsetReset ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET) - // ErrUnknown Unknown broker error - ErrUnknown ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN) - // ErrNoError Success - ErrNoError ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NO_ERROR) - // ErrOffsetOutOfRange Broker: Offset out of range - ErrOffsetOutOfRange ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE) - // ErrInvalidMsg Broker: Invalid message - ErrInvalidMsg ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_MSG) - // ErrUnknownTopicOrPart Broker: Unknown topic or partition - ErrUnknownTopicOrPart ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART) - // ErrInvalidMsgSize Broker: Invalid message size - ErrInvalidMsgSize ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE) - // ErrLeaderNotAvailable Broker: Leader not available - ErrLeaderNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE) - // ErrNotLeaderForPartition Broker: Not leader for partition - ErrNotLeaderForPartition ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION) - // ErrRequestTimedOut Broker: Request timed out - ErrRequestTimedOut ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT) - // ErrBrokerNotAvailable Broker: Broker not available - ErrBrokerNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE) - // ErrReplicaNotAvailable Broker: Replica not available - ErrReplicaNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE) - // ErrMsgSizeTooLarge Broker: Message size too large - ErrMsgSizeTooLarge ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE) - // ErrStaleCtrlEpoch Broker: StaleControllerEpochCode - ErrStaleCtrlEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH) - // ErrOffsetMetadataTooLarge Broker: Offset metadata string too large - ErrOffsetMetadataTooLarge ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE) - // ErrNetworkException Broker: Broker disconnected before response received - ErrNetworkException ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION) - // ErrCoordinatorLoadInProgress Broker: Coordinator load in progress - ErrCoordinatorLoadInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS) - // ErrCoordinatorNotAvailable Broker: Coordinator not available - ErrCoordinatorNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE) - // ErrNotCoordinator Broker: Not coordinator - ErrNotCoordinator ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_COORDINATOR) - // ErrTopicException Broker: Invalid topic - ErrTopicException ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION) - // ErrRecordListTooLarge Broker: Message batch larger than configured server segment size - ErrRecordListTooLarge ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE) - // ErrNotEnoughReplicas Broker: Not enough in-sync replicas - ErrNotEnoughReplicas ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS) - // ErrNotEnoughReplicasAfterAppend Broker: Message(s) written to insufficient number of in-sync replicas - ErrNotEnoughReplicasAfterAppend ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND) - // ErrInvalidRequiredAcks Broker: Invalid required acks value - ErrInvalidRequiredAcks ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS) - // ErrIllegalGeneration Broker: Specified group generation id is not valid - ErrIllegalGeneration ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION) - // ErrInconsistentGroupProtocol Broker: Inconsistent group protocol - ErrInconsistentGroupProtocol ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL) - // ErrInvalidGroupID Broker: Invalid group.id - ErrInvalidGroupID ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_GROUP_ID) - // ErrUnknownMemberID Broker: Unknown member - ErrUnknownMemberID ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID) - // ErrInvalidSessionTimeout Broker: Invalid session timeout - ErrInvalidSessionTimeout ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT) - // ErrRebalanceInProgress Broker: Group rebalance in progress - ErrRebalanceInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS) - // ErrInvalidCommitOffsetSize Broker: Commit offset data size is not valid - ErrInvalidCommitOffsetSize ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE) - // ErrTopicAuthorizationFailed Broker: Topic authorization failed - ErrTopicAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED) - // ErrGroupAuthorizationFailed Broker: Group authorization failed - ErrGroupAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED) - // ErrClusterAuthorizationFailed Broker: Cluster authorization failed - ErrClusterAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED) - // ErrInvalidTimestamp Broker: Invalid timestamp - ErrInvalidTimestamp ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP) - // ErrUnsupportedSaslMechanism Broker: Unsupported SASL mechanism - ErrUnsupportedSaslMechanism ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM) - // ErrIllegalSaslState Broker: Request not valid in current SASL state - ErrIllegalSaslState ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE) - // ErrUnsupportedVersion Broker: API version not supported - ErrUnsupportedVersion ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION) - // ErrTopicAlreadyExists Broker: Topic already exists - ErrTopicAlreadyExists ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS) - // ErrInvalidPartitions Broker: Invalid number of partitions - ErrInvalidPartitions ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_PARTITIONS) - // ErrInvalidReplicationFactor Broker: Invalid replication factor - ErrInvalidReplicationFactor ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR) - // ErrInvalidReplicaAssignment Broker: Invalid replica assignment - ErrInvalidReplicaAssignment ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT) - // ErrInvalidConfig Broker: Configuration is invalid - ErrInvalidConfig ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_CONFIG) - // ErrNotController Broker: Not controller for cluster - ErrNotController ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NOT_CONTROLLER) - // ErrInvalidRequest Broker: Invalid request - ErrInvalidRequest ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_REQUEST) - // ErrUnsupportedForMessageFormat Broker: Message format on broker does not support request - ErrUnsupportedForMessageFormat ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT) - // ErrPolicyViolation Broker: Policy violation - ErrPolicyViolation ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_POLICY_VIOLATION) - // ErrOutOfOrderSequenceNumber Broker: Broker received an out of order sequence number - ErrOutOfOrderSequenceNumber ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER) - // ErrDuplicateSequenceNumber Broker: Broker received a duplicate sequence number - ErrDuplicateSequenceNumber ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER) - // ErrInvalidProducerEpoch Broker: Producer attempted an operation with an old epoch - ErrInvalidProducerEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH) - // ErrInvalidTxnState Broker: Producer attempted a transactional operation in an invalid state - ErrInvalidTxnState ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_TXN_STATE) - // ErrInvalidProducerIDMapping Broker: Producer attempted to use a producer id which is not currently assigned to its transactional id - ErrInvalidProducerIDMapping ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING) - // ErrInvalidTransactionTimeout Broker: Transaction timeout is larger than the maximum value allowed by the broker's max.transaction.timeout.ms - ErrInvalidTransactionTimeout ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT) - // ErrConcurrentTransactions Broker: Producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing - ErrConcurrentTransactions ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS) - // ErrTransactionCoordinatorFenced Broker: Indicates that the transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer - ErrTransactionCoordinatorFenced ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED) - // ErrTransactionalIDAuthorizationFailed Broker: Transactional Id authorization failed - ErrTransactionalIDAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED) - // ErrSecurityDisabled Broker: Security features are disabled - ErrSecurityDisabled ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_SECURITY_DISABLED) - // ErrOperationNotAttempted Broker: Operation not attempted - ErrOperationNotAttempted ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED) - // ErrKafkaStorageError Broker: Disk error when trying to access log file on disk - ErrKafkaStorageError ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR) - // ErrLogDirNotFound Broker: The user-specified log directory is not found in the broker config - ErrLogDirNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND) - // ErrSaslAuthenticationFailed Broker: SASL Authentication failed - ErrSaslAuthenticationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED) - // ErrUnknownProducerID Broker: Unknown Producer Id - ErrUnknownProducerID ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID) - // ErrReassignmentInProgress Broker: Partition reassignment is in progress - ErrReassignmentInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS) - // ErrDelegationTokenAuthDisabled Broker: Delegation Token feature is not enabled - ErrDelegationTokenAuthDisabled ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED) - // ErrDelegationTokenNotFound Broker: Delegation Token is not found on server - ErrDelegationTokenNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND) - // ErrDelegationTokenOwnerMismatch Broker: Specified Principal is not valid Owner/Renewer - ErrDelegationTokenOwnerMismatch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH) - // ErrDelegationTokenRequestNotAllowed Broker: Delegation Token requests are not allowed on this connection - ErrDelegationTokenRequestNotAllowed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED) - // ErrDelegationTokenAuthorizationFailed Broker: Delegation Token authorization failed - ErrDelegationTokenAuthorizationFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED) - // ErrDelegationTokenExpired Broker: Delegation Token is expired - ErrDelegationTokenExpired ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED) - // ErrInvalidPrincipalType Broker: Supplied principalType is not supported - ErrInvalidPrincipalType ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE) - // ErrNonEmptyGroup Broker: The group is not empty - ErrNonEmptyGroup ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP) - // ErrGroupIDNotFound Broker: The group id does not exist - ErrGroupIDNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND) - // ErrFetchSessionIDNotFound Broker: The fetch session ID was not found - ErrFetchSessionIDNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND) - // ErrInvalidFetchSessionEpoch Broker: The fetch session epoch is invalid - ErrInvalidFetchSessionEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH) - // ErrListenerNotFound Broker: No matching listener - ErrListenerNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND) - // ErrTopicDeletionDisabled Broker: Topic deletion is disabled - ErrTopicDeletionDisabled ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED) - // ErrFencedLeaderEpoch Broker: Leader epoch is older than broker epoch - ErrFencedLeaderEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH) - // ErrUnknownLeaderEpoch Broker: Leader epoch is newer than broker epoch - ErrUnknownLeaderEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH) - // ErrUnsupportedCompressionType Broker: Unsupported compression type - ErrUnsupportedCompressionType ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE) - // ErrStaleBrokerEpoch Broker: Broker epoch has changed - ErrStaleBrokerEpoch ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_STALE_BROKER_EPOCH) - // ErrOffsetNotAvailable Broker: Leader high watermark is not caught up - ErrOffsetNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE) - // ErrMemberIDRequired Broker: Group member needs a valid member ID - ErrMemberIDRequired ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED) - // ErrPreferredLeaderNotAvailable Broker: Preferred leader was not available - ErrPreferredLeaderNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE) - // ErrGroupMaxSizeReached Broker: Consumer group has reached maximum size - ErrGroupMaxSizeReached ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED) - // ErrFencedInstanceID Broker: Static consumer fenced by other consumer with same group.instance.id - ErrFencedInstanceID ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID) - // ErrEligibleLeadersNotAvailable Broker: Eligible partition leaders are not available - ErrEligibleLeadersNotAvailable ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_ELIGIBLE_LEADERS_NOT_AVAILABLE) - // ErrElectionNotNeeded Broker: Leader election not needed for topic partition - ErrElectionNotNeeded ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_ELECTION_NOT_NEEDED) - // ErrNoReassignmentInProgress Broker: No partition reassignment is in progress - ErrNoReassignmentInProgress ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_NO_REASSIGNMENT_IN_PROGRESS) - // ErrGroupSubscribedToTopic Broker: Deleting offsets of a topic while the consumer group is subscribed to it - ErrGroupSubscribedToTopic ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_GROUP_SUBSCRIBED_TO_TOPIC) - // ErrInvalidRecord Broker: Broker failed to validate record - ErrInvalidRecord ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_RECORD) - // ErrUnstableOffsetCommit Broker: There are unstable offsets that need to be cleared - ErrUnstableOffsetCommit ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT) - // ErrThrottlingQuotaExceeded Broker: Throttling quota has been exceeded - ErrThrottlingQuotaExceeded ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_THROTTLING_QUOTA_EXCEEDED) - // ErrProducerFenced Broker: There is a newer producer with the same transactionalId which fences the current one - ErrProducerFenced ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_PRODUCER_FENCED) - // ErrResourceNotFound Broker: Request illegally referred to resource that does not exist - ErrResourceNotFound ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_RESOURCE_NOT_FOUND) - // ErrDuplicateResource Broker: Request illegally referred to the same resource twice - ErrDuplicateResource ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_DUPLICATE_RESOURCE) - // ErrUnacceptableCredential Broker: Requested credential would not meet criteria for acceptability - ErrUnacceptableCredential ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_UNACCEPTABLE_CREDENTIAL) - // ErrInconsistentVoterSet Broker: Indicates that the either the sender or recipient of a voter-only request is not one of the expected voters - ErrInconsistentVoterSet ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INCONSISTENT_VOTER_SET) - // ErrInvalidUpdateVersion Broker: Invalid update version - ErrInvalidUpdateVersion ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_INVALID_UPDATE_VERSION) - // ErrFeatureUpdateFailed Broker: Unable to update finalized features due to server error - ErrFeatureUpdateFailed ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_FEATURE_UPDATE_FAILED) - // ErrPrincipalDeserializationFailure Broker: Request principal deserialization failed during forwarding - ErrPrincipalDeserializationFailure ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_PRINCIPAL_DESERIALIZATION_FAILURE) -) diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/librdkafka_darwin_amd64.a b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/librdkafka_darwin_amd64.a deleted file mode 100644 index a49b5fae..00000000 Binary files a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/librdkafka_darwin_amd64.a and /dev/null differ diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/librdkafka_darwin_arm64.a b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/librdkafka_darwin_arm64.a deleted file mode 100644 index 85aa5482..00000000 Binary files a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/librdkafka_darwin_arm64.a and /dev/null differ diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/librdkafka_windows.a b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/librdkafka_windows.a deleted file mode 100644 index 4761207a..00000000 Binary files a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/librdkafka_windows.a and /dev/null differ diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/testhelpers.go b/vendor/github.com/confluentinc/confluent-kafka-go/kafka/testhelpers.go deleted file mode 100644 index fe2dddc5..00000000 --- a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/testhelpers.go +++ /dev/null @@ -1,248 +0,0 @@ -package kafka - -/** - * Copyright 2016 Confluent Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import ( - "context" - "encoding/json" - "fmt" - "math/rand" - "os" - "testing" - "time" -) - -/* -#include "select_rdkafka.h" -*/ -import "C" - -var testconf struct { - Brokers string - Topic string - GroupID string - PerfMsgCount int - PerfMsgSize int - Config []string - conf ConfigMap -} - -// testconf_read reads the test suite config file testconf.json which must -// contain at least Brokers and Topic string properties. -// Returns true if the testconf was found and usable, false if no such file, or panics -// if the file format is wrong. -func testconfRead() bool { - cf, err := os.Open("testconf.json") - if err != nil { - fmt.Fprintf(os.Stderr, "%% testconf.json not found - ignoring test\n") - return false - } - - // Default values - testconf.PerfMsgCount = 2000000 - testconf.PerfMsgSize = 100 - testconf.GroupID = "testgroup" - - jp := json.NewDecoder(cf) - err = jp.Decode(&testconf) - if err != nil { - panic(fmt.Sprintf("Failed to parse testconf: %s", err)) - } - - cf.Close() - - if testconf.Brokers[0] == '$' { - // Read broker list from environment variable - testconf.Brokers = os.Getenv(testconf.Brokers[1:]) - } - - if testconf.Brokers == "" || testconf.Topic == "" { - panic("Missing Brokers or Topic in testconf.json") - } - - return true -} - -// update existing ConfigMap with key=value pairs from testconf.SerializerConfig -func (cm *ConfigMap) updateFromTestconf() error { - if testconf.Config == nil { - return nil - } - - // Translate "key=value" pairs in SerializerConfig to ConfigMap - for _, s := range testconf.Config { - err := cm.Set(s) - if err != nil { - return err - } - } - - return nil - -} - -// Return the number of messages available in all partitions of a topic. -// WARNING: This uses watermark offsets so it will be incorrect for compacted topics. -func getMessageCountInTopic(topic string) (int, error) { - - // Create consumer - config := &ConfigMap{"bootstrap.servers": testconf.Brokers, - "group.id": testconf.GroupID} - config.updateFromTestconf() - - c, err := NewConsumer(config) - if err != nil { - return 0, err - } - defer c.Close() - - // get metadata for the topic to find out number of partitions - - metadata, err := c.GetMetadata(&topic, false, 5*1000) - if err != nil { - return 0, err - } - - t, ok := metadata.Topics[topic] - if !ok { - return 0, newError(C.RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC) - } - - cnt := 0 - for _, p := range t.Partitions { - low, high, err := c.QueryWatermarkOffsets(topic, p.ID, 5*1000) - if err != nil { - continue - } - cnt += int(high - low) - } - - return cnt, nil -} - -// getBrokerList returns a list of brokers (ids) in the cluster -func getBrokerList(H Handle) (brokers []int32, err error) { - md, err := getMetadata(H, nil, true, 15*1000) - if err != nil { - return nil, err - } - - brokers = make([]int32, len(md.Brokers)) - for i, mdBroker := range md.Brokers { - brokers[i] = mdBroker.ID - } - - return brokers, nil -} - -// waitTopicInMetadata waits for the given topic to show up in metadata -func waitTopicInMetadata(H Handle, topic string, timeoutMs int) error { - d, _ := time.ParseDuration(fmt.Sprintf("%dms", timeoutMs)) - tEnd := time.Now().Add(d) - - for { - remain := tEnd.Sub(time.Now()).Seconds() - if remain < 0.0 { - return newErrorFromString(ErrTimedOut, - fmt.Sprintf("Timed out waiting for topic %s to appear in metadata", topic)) - } - - md, err := getMetadata(H, nil, true, int(remain*1000)) - if err != nil { - return err - } - - for _, t := range md.Topics { - if t.Topic != topic { - continue - } - if t.Error.Code() != ErrNoError || len(t.Partitions) < 1 { - continue - } - // Proper topic found in metadata - return nil - } - - time.Sleep(500 * 1000) // 500ms - } - -} - -func createAdminClient(t *testing.T) (a *AdminClient) { - numver, strver := LibraryVersion() - if numver < 0x000b0500 { - t.Skipf("Requires librdkafka >=0.11.5 (currently on %s, 0x%x)", strver, numver) - } - - if !testconfRead() { - t.Skipf("Missing testconf.json") - } - - conf := ConfigMap{"bootstrap.servers": testconf.Brokers} - conf.updateFromTestconf() - - /* - * Create producer and produce a couple of messages with and without - * headers. - */ - a, err := NewAdminClient(&conf) - if err != nil { - t.Fatalf("NewAdminClient: %v", err) - } - - return a -} - -func createTestTopic(t *testing.T, suffix string, numPartitions int, replicationFactor int) string { - rand.Seed(time.Now().Unix()) - - topic := fmt.Sprintf("%s-%s-%d", testconf.Topic, suffix, rand.Intn(100000)) - - a := createAdminClient(t) - defer a.Close() - - newTopics := []TopicSpecification{ - { - Topic: topic, - NumPartitions: numPartitions, - ReplicationFactor: replicationFactor, - }, - } - - maxDuration, err := time.ParseDuration("30s") - if err != nil { - t.Fatalf("%s", err) - } - - ctx, cancel := context.WithTimeout(context.Background(), maxDuration) - defer cancel() - result, err := a.CreateTopics(ctx, newTopics, nil) - if err != nil { - t.Fatalf("CreateTopics() failed: %s", err) - } - - for _, res := range result { - if res.Error.Code() != ErrNoError { - t.Errorf("Failed to create topic %s: %s\n", - res.Topic, res.Error) - continue - } - - } - - return topic -} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/LICENSE b/vendor/github.com/confluentinc/confluent-kafka-go/v2/LICENSE similarity index 100% rename from vendor/github.com/confluentinc/confluent-kafka-go/LICENSE rename to vendor/github.com/confluentinc/confluent-kafka-go/v2/LICENSE diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/.gitignore b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/.gitignore similarity index 100% rename from vendor/github.com/confluentinc/confluent-kafka-go/kafka/.gitignore rename to vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/.gitignore diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/00version.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/00version.go similarity index 85% rename from vendor/github.com/confluentinc/confluent-kafka-go/kafka/00version.go rename to vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/00version.go index 2b8f5a7e..1c87d7f4 100644 --- a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/00version.go +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/00version.go @@ -29,19 +29,19 @@ import ( //defines and strings in sync. // -#define MIN_RD_KAFKA_VERSION 0x01090000 +#define MIN_RD_KAFKA_VERSION 0x02060100 #ifdef __APPLE__ -#define MIN_VER_ERRSTR "confluent-kafka-go requires librdkafka v1.9.0 or later. Install the latest version of librdkafka from Homebrew by running `brew install librdkafka` or `brew upgrade librdkafka`" +#define MIN_VER_ERRSTR "confluent-kafka-go requires librdkafka v2.6.1 or later. Install the latest version of librdkafka from Homebrew by running `brew install librdkafka` or `brew upgrade librdkafka`" #else -#define MIN_VER_ERRSTR "confluent-kafka-go requires librdkafka v1.9.0 or later. Install the latest version of librdkafka from the Confluent repositories, see http://docs.confluent.io/current/installation.html" +#define MIN_VER_ERRSTR "confluent-kafka-go requires librdkafka v2.6.1 or later. Install the latest version of librdkafka from the Confluent repositories, see http://docs.confluent.io/current/installation.html" #endif #if RD_KAFKA_VERSION < MIN_RD_KAFKA_VERSION #ifdef __APPLE__ -#error "confluent-kafka-go requires librdkafka v1.9.0 or later. Install the latest version of librdkafka from Homebrew by running `brew install librdkafka` or `brew upgrade librdkafka`" +#error "confluent-kafka-go requires librdkafka v2.6.1 or later. Install the latest version of librdkafka from Homebrew by running `brew install librdkafka` or `brew upgrade librdkafka`" #else -#error "confluent-kafka-go requires librdkafka v1.9.0 or later. Install the latest version of librdkafka from the Confluent repositories, see http://docs.confluent.io/current/installation.html" +#error "confluent-kafka-go requires librdkafka v2.6.1 or later. Install the latest version of librdkafka from the Confluent repositories, see http://docs.confluent.io/current/installation.html" #endif #endif */ diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/README.md b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/README.md similarity index 90% rename from vendor/github.com/confluentinc/confluent-kafka-go/kafka/README.md rename to vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/README.md index fcbb62bf..26534ed2 100644 --- a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/README.md +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/README.md @@ -1,6 +1,15 @@ # Information for confluent-kafka-go developers -Whenever librdkafka error codes are updated make sure to run generate +## Development process + +1. Use go1.21 (and related tooling) for development on confluent-kafka-go. +2. Make sure to run `gofmt` and `go vet` on your code. +3. While there is no hard-limit, try to keep your line length under 80 + characters. +3. [Test](#testing) your changes and create a PR. + + +NOTE: Whenever librdkafka error codes are updated make sure to run generate before building: ``` @@ -10,7 +19,6 @@ before building: - ## Testing Some of the tests included in this directory, the benchmark and integration tests in particular, diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/adminapi.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/adminapi.go new file mode 100644 index 00000000..7490fed9 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/adminapi.go @@ -0,0 +1,3758 @@ +/** + * Copyright 2018 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka + +import ( + "context" + "fmt" + "strings" + "sync/atomic" + "time" + "unsafe" +) + +/* +#include "select_rdkafka.h" +#include + +static const rd_kafka_group_result_t * +group_result_by_idx (const rd_kafka_group_result_t **groups, size_t cnt, size_t idx) { + if (idx >= cnt) + return NULL; + return groups[idx]; +} + +static const rd_kafka_topic_result_t * +topic_result_by_idx (const rd_kafka_topic_result_t **topics, size_t cnt, size_t idx) { + if (idx >= cnt) + return NULL; + return topics[idx]; +} + +static const rd_kafka_ConfigResource_t * +ConfigResource_by_idx (const rd_kafka_ConfigResource_t **res, size_t cnt, size_t idx) { + if (idx >= cnt) + return NULL; + return res[idx]; +} + +static const rd_kafka_ConfigEntry_t * +ConfigEntry_by_idx (const rd_kafka_ConfigEntry_t **entries, size_t cnt, size_t idx) { + if (idx >= cnt) + return NULL; + return entries[idx]; +} + +static const rd_kafka_acl_result_t * +acl_result_by_idx (const rd_kafka_acl_result_t **acl_results, size_t cnt, size_t idx) { + if (idx >= cnt) + return NULL; + return acl_results[idx]; +} + +static const rd_kafka_DeleteAcls_result_response_t * +DeleteAcls_result_response_by_idx (const rd_kafka_DeleteAcls_result_response_t **delete_acls_result_responses, size_t cnt, size_t idx) { + if (idx >= cnt) + return NULL; + return delete_acls_result_responses[idx]; +} + +static const rd_kafka_AclBinding_t * +AclBinding_by_idx (const rd_kafka_AclBinding_t **acl_bindings, size_t cnt, size_t idx) { + if (idx >= cnt) + return NULL; + return acl_bindings[idx]; +} + +static const rd_kafka_ConsumerGroupListing_t * +ConsumerGroupListing_by_idx(const rd_kafka_ConsumerGroupListing_t **result_groups, size_t cnt, size_t idx) { + if (idx >= cnt) + return NULL; + return result_groups[idx]; +} + +static const rd_kafka_ConsumerGroupDescription_t * +ConsumerGroupDescription_by_idx(const rd_kafka_ConsumerGroupDescription_t **result_groups, size_t cnt, size_t idx) { + if (idx >= cnt) + return NULL; + return result_groups[idx]; +} + +static const rd_kafka_TopicDescription_t * +TopicDescription_by_idx(const rd_kafka_TopicDescription_t **result_topics, size_t cnt, size_t idx) { + if (idx >= cnt) + return NULL; + return result_topics[idx]; +} + +static const rd_kafka_TopicPartitionInfo_t * +TopicPartitionInfo_by_idx(const rd_kafka_TopicPartitionInfo_t **partitions, size_t cnt, size_t idx) { + if (idx >= cnt) + return NULL; + return partitions[idx]; +} + +static const rd_kafka_AclOperation_t AclOperation_by_idx(const rd_kafka_AclOperation_t *acl_operations, size_t cnt, size_t idx) { + if (idx >= cnt) + return RD_KAFKA_ACL_OPERATION_UNKNOWN; + return acl_operations[idx]; +} + +static const rd_kafka_Node_t *Node_by_idx(const rd_kafka_Node_t **nodes, size_t cnt, size_t idx) { + if (idx >= cnt) + return NULL; + return nodes[idx]; +} + +static const rd_kafka_UserScramCredentialsDescription_t * +DescribeUserScramCredentials_result_description_by_idx(const rd_kafka_UserScramCredentialsDescription_t **descriptions, size_t cnt, size_t idx) { + if (idx >= cnt) + return NULL; + return descriptions[idx]; +} + +static const rd_kafka_AlterUserScramCredentials_result_response_t* +AlterUserScramCredentials_result_response_by_idx(const rd_kafka_AlterUserScramCredentials_result_response_t **responses, size_t cnt, size_t idx) { + if (idx >= cnt) + return NULL; + return responses[idx]; +} + +static const rd_kafka_ListOffsetsResultInfo_t * +ListOffsetsResultInfo_by_idx(const rd_kafka_ListOffsetsResultInfo_t **result_infos, size_t cnt, size_t idx) { + if (idx >= cnt) + return NULL; + return result_infos[idx]; +} + +static const rd_kafka_error_t * +error_by_idx(const rd_kafka_error_t **errors, size_t cnt, size_t idx) { + if (idx >= cnt) + return NULL; + return errors[idx]; +} + +static const rd_kafka_topic_partition_result_t * +TopicPartitionResult_by_idx(const rd_kafka_topic_partition_result_t **results, size_t cnt, size_t idx) { + if (idx >= cnt) + return NULL; + return results[idx]; +} +*/ +import "C" + +// AdminClient is derived from an existing Producer or Consumer +type AdminClient struct { + handle *handle + isDerived bool // Derived from existing client handle + isClosed uint32 // to check if Admin Client is closed or not. +} + +// IsClosed returns boolean representing if client is closed or not +func (a *AdminClient) IsClosed() bool { + return atomic.LoadUint32(&a.isClosed) == 1 +} + +func (a *AdminClient) verifyClient() error { + if a.IsClosed() { + return getOperationNotAllowedErrorForClosedClient() + } + return nil +} + +func durationToMilliseconds(t time.Duration) int { + if t > 0 { + return (int)(t.Seconds() * 1000.0) + } + return (int)(t) +} + +// TopicResult provides per-topic operation result (error) information. +type TopicResult struct { + // Topic name + Topic string + // Error, if any, of result. Check with `Error.Code() != ErrNoError`. + Error Error +} + +// String returns a human-readable representation of a TopicResult. +func (t TopicResult) String() string { + if t.Error.code == 0 { + return t.Topic + } + return fmt.Sprintf("%s (%s)", t.Topic, t.Error.str) +} + +// ConsumerGroupResult provides per-group operation result (error) information. +type ConsumerGroupResult struct { + // Group name + Group string + // Error, if any, of result. Check with `Error.Code() != ErrNoError`. + Error Error +} + +// String returns a human-readable representation of a ConsumerGroupResult. +func (g ConsumerGroupResult) String() string { + if g.Error.code == ErrNoError { + return g.Group + } + return fmt.Sprintf("%s (%s)", g.Group, g.Error.str) +} + +// ConsumerGroupState represents a consumer group state +type ConsumerGroupState int + +const ( + // ConsumerGroupStateUnknown - Unknown ConsumerGroupState + ConsumerGroupStateUnknown ConsumerGroupState = C.RD_KAFKA_CONSUMER_GROUP_STATE_UNKNOWN + // ConsumerGroupStatePreparingRebalance - preparing rebalance + ConsumerGroupStatePreparingRebalance ConsumerGroupState = C.RD_KAFKA_CONSUMER_GROUP_STATE_PREPARING_REBALANCE + // ConsumerGroupStateCompletingRebalance - completing rebalance + ConsumerGroupStateCompletingRebalance ConsumerGroupState = C.RD_KAFKA_CONSUMER_GROUP_STATE_COMPLETING_REBALANCE + // ConsumerGroupStateStable - stable + ConsumerGroupStateStable ConsumerGroupState = C.RD_KAFKA_CONSUMER_GROUP_STATE_STABLE + // ConsumerGroupStateDead - dead group + ConsumerGroupStateDead ConsumerGroupState = C.RD_KAFKA_CONSUMER_GROUP_STATE_DEAD + // ConsumerGroupStateEmpty - empty group + ConsumerGroupStateEmpty ConsumerGroupState = C.RD_KAFKA_CONSUMER_GROUP_STATE_EMPTY +) + +// String returns the human-readable representation of a consumer_group_state +func (t ConsumerGroupState) String() string { + return C.GoString(C.rd_kafka_consumer_group_state_name( + C.rd_kafka_consumer_group_state_t(t))) +} + +// ConsumerGroupStateFromString translates a consumer group state name/string to +// a ConsumerGroupState value. +func ConsumerGroupStateFromString(stateString string) (ConsumerGroupState, error) { + cStr := C.CString(stateString) + defer C.free(unsafe.Pointer(cStr)) + state := ConsumerGroupState(C.rd_kafka_consumer_group_state_code(cStr)) + return state, nil +} + +// ConsumerGroupType represents a consumer group type +type ConsumerGroupType int + +const ( + // ConsumerGroupTypeUnknown - Unknown ConsumerGroupType + ConsumerGroupTypeUnknown ConsumerGroupType = C.RD_KAFKA_CONSUMER_GROUP_TYPE_UNKNOWN + // ConsumerGroupTypeConsumer - Consumer ConsumerGroupType + ConsumerGroupTypeConsumer ConsumerGroupType = C.RD_KAFKA_CONSUMER_GROUP_TYPE_CONSUMER + // ConsumerGroupTypeClassic - Classic ConsumerGroupType + ConsumerGroupTypeClassic ConsumerGroupType = C.RD_KAFKA_CONSUMER_GROUP_TYPE_CLASSIC +) + +// String returns the human-readable representation of a ConsumerGroupType +func (t ConsumerGroupType) String() string { + return C.GoString(C.rd_kafka_consumer_group_type_name( + C.rd_kafka_consumer_group_type_t(t))) +} + +// ConsumerGroupTypeFromString translates a consumer group type name/string to +// a ConsumerGroupType value. +func ConsumerGroupTypeFromString(typeString string) ConsumerGroupType { + cStr := C.CString(typeString) + defer C.free(unsafe.Pointer(cStr)) + groupType := ConsumerGroupType(C.rd_kafka_consumer_group_type_code(cStr)) + return groupType +} + +// ConsumerGroupListing represents the result of ListConsumerGroups for a single +// group. +type ConsumerGroupListing struct { + // Group id. + GroupID string + // Is a simple consumer group. + IsSimpleConsumerGroup bool + // Group state. + State ConsumerGroupState + // Group type. + Type ConsumerGroupType +} + +// ListConsumerGroupsResult represents ListConsumerGroups results and errors. +type ListConsumerGroupsResult struct { + // List of valid ConsumerGroupListings. + Valid []ConsumerGroupListing + // List of errors. + Errors []error +} + +// DeletedRecords contains information about deleted +// records of a single partition +type DeletedRecords struct { + // Low-watermark offset after deletion + LowWatermark Offset +} + +// DeleteRecordsResult represents the result of a DeleteRecords call +// for a single partition. +type DeleteRecordsResult struct { + // One of requested partitions. + // The Error field is set if any occurred for that partition. + TopicPartition TopicPartition + // Deleted records information, or nil if an error occurred. + DeletedRecords *DeletedRecords +} + +// DeleteRecordsResults represents the results of a DeleteRecords call. +type DeleteRecordsResults struct { + // A slice of DeleteRecordsResult, one for each requested topic partition. + DeleteRecordsResults []DeleteRecordsResult +} + +// MemberAssignment represents the assignment of a consumer group member. +type MemberAssignment struct { + // Partitions assigned to current member. + TopicPartitions []TopicPartition +} + +// MemberDescription represents the description of a consumer group member. +type MemberDescription struct { + // Client id. + ClientID string + // Group instance id. + GroupInstanceID string + // Consumer id. + ConsumerID string + // Group member host. + Host string + // Member assignment. + Assignment MemberAssignment +} + +// ConsumerGroupDescription represents the result of DescribeConsumerGroups for +// a single group. +type ConsumerGroupDescription struct { + // Group id. + GroupID string + // Error, if any, of result. Check with `Error.Code() != ErrNoError`. + Error Error + // Is a simple consumer group. + IsSimpleConsumerGroup bool + // Partition assignor identifier. + PartitionAssignor string + // Consumer group state. + State ConsumerGroupState + // Consumer group coordinator (has ID == -1 if not known). + Coordinator Node + // Members list. + Members []MemberDescription + // Operations allowed for the group (nil if not available or not requested) + AuthorizedOperations []ACLOperation +} + +// DescribeConsumerGroupsResult represents the result of a +// DescribeConsumerGroups call. +type DescribeConsumerGroupsResult struct { + // Slice of ConsumerGroupDescription. + ConsumerGroupDescriptions []ConsumerGroupDescription +} + +// TopicCollection represents a collection of topics. +type TopicCollection struct { + // Slice of topic names. + topicNames []string +} + +// NewTopicCollectionOfTopicNames creates a new TopicCollection based on a list +// of topic names. +func NewTopicCollectionOfTopicNames(names []string) TopicCollection { + return TopicCollection{ + topicNames: names, + } +} + +// TopicPartitionInfo represents a specific partition's information inside a +// TopicDescription. +type TopicPartitionInfo struct { + // Partition id. + Partition int + // Leader broker. + Leader *Node + // Replicas of the partition. + Replicas []Node + // In-Sync-Replicas of the partition. + Isr []Node +} + +// TopicDescription represents the result of DescribeTopics for +// a single topic. +type TopicDescription struct { + // Topic name. + Name string + // Topic Id + TopicID UUID + // Error, if any, of the result. Check with `Error.Code() != ErrNoError`. + Error Error + // Is the topic internal to Kafka? + IsInternal bool + // Partitions' information list. + Partitions []TopicPartitionInfo + // Operations allowed for the topic (nil if not available or not requested). + AuthorizedOperations []ACLOperation +} + +// DescribeTopicsResult represents the result of a +// DescribeTopics call. +type DescribeTopicsResult struct { + // Slice of TopicDescription. + TopicDescriptions []TopicDescription +} + +// DescribeClusterResult represents the result of DescribeCluster. +type DescribeClusterResult struct { + // Cluster id for the cluster (always available if broker version >= 0.10.1.0, otherwise nil). + ClusterID *string + // Current controller broker for the cluster (nil if there is none). + Controller *Node + // List of brokers in the cluster. + Nodes []Node + // Operations allowed for the cluster (nil if not available or not requested). + AuthorizedOperations []ACLOperation +} + +// DeleteConsumerGroupsResult represents the result of a DeleteConsumerGroups +// call. +type DeleteConsumerGroupsResult struct { + // Slice of ConsumerGroupResult. + ConsumerGroupResults []ConsumerGroupResult +} + +// ListConsumerGroupOffsetsResult represents the result of a +// ListConsumerGroupOffsets operation. +type ListConsumerGroupOffsetsResult struct { + // A slice of ConsumerGroupTopicPartitions, each element represents a group's + // TopicPartitions and Offsets. + ConsumerGroupsTopicPartitions []ConsumerGroupTopicPartitions +} + +// AlterConsumerGroupOffsetsResult represents the result of a +// AlterConsumerGroupOffsets operation. +type AlterConsumerGroupOffsetsResult struct { + // A slice of ConsumerGroupTopicPartitions, each element represents a group's + // TopicPartitions and Offsets. + ConsumerGroupsTopicPartitions []ConsumerGroupTopicPartitions +} + +// TopicSpecification holds parameters for creating a new topic. +// TopicSpecification is analogous to NewTopic in the Java Topic Admin API. +type TopicSpecification struct { + // Topic name to create. + Topic string + // Number of partitions in topic. + NumPartitions int + // Default replication factor for the topic's partitions, or zero + // if an explicit ReplicaAssignment is set. + ReplicationFactor int + // (Optional) Explicit replica assignment. The outer array is + // indexed by the partition number, while the inner per-partition array + // contains the replica broker ids. The first broker in each + // broker id list will be the preferred replica. + ReplicaAssignment [][]int32 + // Topic configuration. + Config map[string]string +} + +// PartitionsSpecification holds parameters for creating additional partitions for a topic. +// PartitionsSpecification is analogous to NewPartitions in the Java Topic Admin API. +type PartitionsSpecification struct { + // Topic to create more partitions for. + Topic string + // New partition count for topic, must be higher than current partition count. + IncreaseTo int + // (Optional) Explicit replica assignment. The outer array is + // indexed by the new partition index (i.e., 0 for the first added + // partition), while the inner per-partition array + // contains the replica broker ids. The first broker in each + // broker id list will be the preferred replica. + ReplicaAssignment [][]int32 +} + +// ResourceType represents an Apache Kafka resource type +type ResourceType int + +const ( + // ResourceUnknown - Unknown + ResourceUnknown ResourceType = C.RD_KAFKA_RESOURCE_UNKNOWN + // ResourceAny - match any resource type (DescribeConfigs) + ResourceAny ResourceType = C.RD_KAFKA_RESOURCE_ANY + // ResourceTopic - Topic + ResourceTopic ResourceType = C.RD_KAFKA_RESOURCE_TOPIC + // ResourceGroup - Group + ResourceGroup ResourceType = C.RD_KAFKA_RESOURCE_GROUP + // ResourceBroker - Broker + ResourceBroker ResourceType = C.RD_KAFKA_RESOURCE_BROKER +) + +// String returns the human-readable representation of a ResourceType +func (t ResourceType) String() string { + return C.GoString(C.rd_kafka_ResourceType_name(C.rd_kafka_ResourceType_t(t))) +} + +// ResourceTypeFromString translates a resource type name/string to +// a ResourceType value. +func ResourceTypeFromString(typeString string) (ResourceType, error) { + switch strings.ToUpper(typeString) { + case "ANY": + return ResourceAny, nil + case "TOPIC": + return ResourceTopic, nil + case "GROUP": + return ResourceGroup, nil + case "BROKER": + return ResourceBroker, nil + default: + return ResourceUnknown, NewError(ErrInvalidArg, "Unknown resource type", false) + } +} + +// ConfigSource represents an Apache Kafka config source +type ConfigSource int + +const ( + // ConfigSourceUnknown is the default value + ConfigSourceUnknown ConfigSource = C.RD_KAFKA_CONFIG_SOURCE_UNKNOWN_CONFIG + // ConfigSourceDynamicTopic is dynamic topic config that is configured for a specific topic + ConfigSourceDynamicTopic ConfigSource = C.RD_KAFKA_CONFIG_SOURCE_DYNAMIC_TOPIC_CONFIG + // ConfigSourceDynamicBroker is dynamic broker config that is configured for a specific broker + ConfigSourceDynamicBroker ConfigSource = C.RD_KAFKA_CONFIG_SOURCE_DYNAMIC_BROKER_CONFIG + // ConfigSourceDynamicDefaultBroker is dynamic broker config that is configured as default for all brokers in the cluster + ConfigSourceDynamicDefaultBroker ConfigSource = C.RD_KAFKA_CONFIG_SOURCE_DYNAMIC_DEFAULT_BROKER_CONFIG + // ConfigSourceStaticBroker is static broker config provided as broker properties at startup (e.g. from server.properties file) + ConfigSourceStaticBroker ConfigSource = C.RD_KAFKA_CONFIG_SOURCE_STATIC_BROKER_CONFIG + // ConfigSourceDefault is built-in default configuration for configs that have a default value + ConfigSourceDefault ConfigSource = C.RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG +) + +// String returns the human-readable representation of a ConfigSource type +func (t ConfigSource) String() string { + return C.GoString(C.rd_kafka_ConfigSource_name(C.rd_kafka_ConfigSource_t(t))) +} + +// ConfigResource holds parameters for altering an Apache Kafka configuration resource +type ConfigResource struct { + // Type of resource to set. + Type ResourceType + // Name of resource to set. + Name string + // Config entries to set. + // Configuration updates are atomic, any configuration property not provided + // here will be reverted (by the broker) to its default value. + // Use DescribeConfigs to retrieve the list of current configuration entry values. + Config []ConfigEntry +} + +// String returns a human-readable representation of a ConfigResource +func (c ConfigResource) String() string { + return fmt.Sprintf("Resource(%s, %s)", c.Type, c.Name) +} + +// AlterOperation specifies the operation to perform on the ConfigEntry. +// Currently only AlterOperationSet. +type AlterOperation int + +const ( + // AlterOperationSet sets/overwrites the configuration setting. + AlterOperationSet = iota +) + +// String returns the human-readable representation of an AlterOperation +func (o AlterOperation) String() string { + switch o { + case AlterOperationSet: + return "Set" + default: + return fmt.Sprintf("Unknown%d?", int(o)) + } +} + +// AlterConfigOpType specifies the operation to perform +// on the ConfigEntry for IncrementalAlterConfig +type AlterConfigOpType int + +const ( + // AlterConfigOpTypeSet sets/overwrites the configuration + // setting. + AlterConfigOpTypeSet AlterConfigOpType = C.RD_KAFKA_ALTER_CONFIG_OP_TYPE_SET + // AlterConfigOpTypeDelete sets the configuration setting + // to default or NULL. + AlterConfigOpTypeDelete AlterConfigOpType = C.RD_KAFKA_ALTER_CONFIG_OP_TYPE_DELETE + // AlterConfigOpTypeAppend appends the value to existing + // configuration settings. + AlterConfigOpTypeAppend AlterConfigOpType = C.RD_KAFKA_ALTER_CONFIG_OP_TYPE_APPEND + // AlterConfigOpTypeSubtract subtracts the value from + // existing configuration settings. + AlterConfigOpTypeSubtract AlterConfigOpType = C.RD_KAFKA_ALTER_CONFIG_OP_TYPE_SUBTRACT +) + +// String returns the human-readable representation of an AlterOperation +func (o AlterConfigOpType) String() string { + switch o { + case AlterConfigOpTypeSet: + return "Set" + case AlterConfigOpTypeDelete: + return "Delete" + case AlterConfigOpTypeAppend: + return "Append" + case AlterConfigOpTypeSubtract: + return "Subtract" + default: + return fmt.Sprintf("Unknown %d", int(o)) + } +} + +// ConfigEntry holds parameters for altering a resource's configuration. +type ConfigEntry struct { + // Name of configuration entry, e.g., topic configuration property name. + Name string + // Value of configuration entry. + Value string + // Deprecated: Operation to perform on the entry. + Operation AlterOperation + // Operation to perform on the entry incrementally. + IncrementalOperation AlterConfigOpType +} + +// StringMapToConfigEntries creates a new map of ConfigEntry objects from the +// provided string map. The AlterOperation is set on each created entry. +func StringMapToConfigEntries(stringMap map[string]string, operation AlterOperation) []ConfigEntry { + var ceList []ConfigEntry + + for k, v := range stringMap { + ceList = append(ceList, ConfigEntry{Name: k, Value: v, Operation: operation}) + } + + return ceList +} + +// StringMapToIncrementalConfigEntries creates a new map of ConfigEntry objects from the +// provided string map an operation map. The AlterConfigOpType is set on each created entry. +func StringMapToIncrementalConfigEntries(stringMap map[string]string, + operationMap map[string]AlterConfigOpType) []ConfigEntry { + var ceList []ConfigEntry + + for k, v := range stringMap { + ceList = append(ceList, ConfigEntry{Name: k, Value: v, IncrementalOperation: operationMap[k]}) + } + + return ceList +} + +// String returns a human-readable representation of a ConfigEntry. +func (c ConfigEntry) String() string { + return fmt.Sprintf("%v %s=\"%s\"", c.Operation, c.Name, c.Value) +} + +// ConfigEntryResult contains the result of a single configuration entry from a +// DescribeConfigs request. +type ConfigEntryResult struct { + // Name of configuration entry, e.g., topic configuration property name. + Name string + // Value of configuration entry. + Value string + // Source indicates the configuration source. + Source ConfigSource + // IsReadOnly indicates whether the configuration entry can be altered. + IsReadOnly bool + // IsDefault indicates whether the value is at its default. + IsDefault bool + // IsSensitive indicates whether the configuration entry contains sensitive information, in which case the value will be unset. + IsSensitive bool + // IsSynonym indicates whether the configuration entry is a synonym for another configuration property. + IsSynonym bool + // Synonyms contains a map of configuration entries that are synonyms to this configuration entry. + Synonyms map[string]ConfigEntryResult +} + +// String returns a human-readable representation of a ConfigEntryResult. +func (c ConfigEntryResult) String() string { + return fmt.Sprintf("%s=\"%s\"", c.Name, c.Value) +} + +// setFromC sets up a ConfigEntryResult from a C ConfigEntry +func configEntryResultFromC(cEntry *C.rd_kafka_ConfigEntry_t) (entry ConfigEntryResult) { + entry.Name = C.GoString(C.rd_kafka_ConfigEntry_name(cEntry)) + cValue := C.rd_kafka_ConfigEntry_value(cEntry) + if cValue != nil { + entry.Value = C.GoString(cValue) + } + entry.Source = ConfigSource(C.rd_kafka_ConfigEntry_source(cEntry)) + entry.IsReadOnly = cint2bool(C.rd_kafka_ConfigEntry_is_read_only(cEntry)) + entry.IsDefault = cint2bool(C.rd_kafka_ConfigEntry_is_default(cEntry)) + entry.IsSensitive = cint2bool(C.rd_kafka_ConfigEntry_is_sensitive(cEntry)) + entry.IsSynonym = cint2bool(C.rd_kafka_ConfigEntry_is_synonym(cEntry)) + + var cSynCnt C.size_t + cSyns := C.rd_kafka_ConfigEntry_synonyms(cEntry, &cSynCnt) + if cSynCnt > 0 { + entry.Synonyms = make(map[string]ConfigEntryResult) + } + + for si := 0; si < int(cSynCnt); si++ { + cSyn := C.ConfigEntry_by_idx(cSyns, cSynCnt, C.size_t(si)) + Syn := configEntryResultFromC(cSyn) + entry.Synonyms[Syn.Name] = Syn + } + + return entry +} + +// ConfigResourceResult provides the result for a resource from a AlterConfigs or +// DescribeConfigs request. +type ConfigResourceResult struct { + // Type of returned result resource. + Type ResourceType + // Name of returned result resource. + Name string + // Error, if any, of returned result resource. + Error Error + // Config entries, if any, of returned result resource. + Config map[string]ConfigEntryResult +} + +// String returns a human-readable representation of a ConfigResourceResult. +func (c ConfigResourceResult) String() string { + if c.Error.Code() != 0 { + return fmt.Sprintf("ResourceResult(%s, %s, \"%v\")", c.Type, c.Name, c.Error) + + } + return fmt.Sprintf("ResourceResult(%s, %s, %d config(s))", c.Type, c.Name, len(c.Config)) +} + +// ResourcePatternType enumerates the different types of Kafka resource patterns. +type ResourcePatternType int + +const ( + // ResourcePatternTypeUnknown is a resource pattern type not known or not set. + ResourcePatternTypeUnknown ResourcePatternType = C.RD_KAFKA_RESOURCE_PATTERN_UNKNOWN + // ResourcePatternTypeAny matches any resource, used for lookups. + ResourcePatternTypeAny ResourcePatternType = C.RD_KAFKA_RESOURCE_PATTERN_ANY + // ResourcePatternTypeMatch will perform pattern matching + ResourcePatternTypeMatch ResourcePatternType = C.RD_KAFKA_RESOURCE_PATTERN_MATCH + // ResourcePatternTypeLiteral matches a literal resource name + ResourcePatternTypeLiteral ResourcePatternType = C.RD_KAFKA_RESOURCE_PATTERN_LITERAL + // ResourcePatternTypePrefixed matches a prefixed resource name + ResourcePatternTypePrefixed ResourcePatternType = C.RD_KAFKA_RESOURCE_PATTERN_PREFIXED +) + +// String returns the human-readable representation of a ResourcePatternType +func (t ResourcePatternType) String() string { + return C.GoString(C.rd_kafka_ResourcePatternType_name(C.rd_kafka_ResourcePatternType_t(t))) +} + +// ResourcePatternTypeFromString translates a resource pattern type name to +// a ResourcePatternType value. +func ResourcePatternTypeFromString(patternTypeString string) (ResourcePatternType, error) { + switch strings.ToUpper(patternTypeString) { + case "ANY": + return ResourcePatternTypeAny, nil + case "MATCH": + return ResourcePatternTypeMatch, nil + case "LITERAL": + return ResourcePatternTypeLiteral, nil + case "PREFIXED": + return ResourcePatternTypePrefixed, nil + default: + return ResourcePatternTypeUnknown, NewError(ErrInvalidArg, "Unknown resource pattern type", false) + } +} + +// ACLOperation enumerates the different types of ACL operation. +type ACLOperation int + +const ( + // ACLOperationUnknown represents an unknown or unset operation + ACLOperationUnknown ACLOperation = C.RD_KAFKA_ACL_OPERATION_UNKNOWN + // ACLOperationAny in a filter, matches any ACLOperation + ACLOperationAny ACLOperation = C.RD_KAFKA_ACL_OPERATION_ANY + // ACLOperationAll represents all the operations + ACLOperationAll ACLOperation = C.RD_KAFKA_ACL_OPERATION_ALL + // ACLOperationRead a read operation + ACLOperationRead ACLOperation = C.RD_KAFKA_ACL_OPERATION_READ + // ACLOperationWrite represents a write operation + ACLOperationWrite ACLOperation = C.RD_KAFKA_ACL_OPERATION_WRITE + // ACLOperationCreate represents a create operation + ACLOperationCreate ACLOperation = C.RD_KAFKA_ACL_OPERATION_CREATE + // ACLOperationDelete represents a delete operation + ACLOperationDelete ACLOperation = C.RD_KAFKA_ACL_OPERATION_DELETE + // ACLOperationAlter represents an alter operation + ACLOperationAlter ACLOperation = C.RD_KAFKA_ACL_OPERATION_ALTER + // ACLOperationDescribe represents a describe operation + ACLOperationDescribe ACLOperation = C.RD_KAFKA_ACL_OPERATION_DESCRIBE + // ACLOperationClusterAction represents a cluster action operation + ACLOperationClusterAction ACLOperation = C.RD_KAFKA_ACL_OPERATION_CLUSTER_ACTION + // ACLOperationDescribeConfigs represents a describe configs operation + ACLOperationDescribeConfigs ACLOperation = C.RD_KAFKA_ACL_OPERATION_DESCRIBE_CONFIGS + // ACLOperationAlterConfigs represents an alter configs operation + ACLOperationAlterConfigs ACLOperation = C.RD_KAFKA_ACL_OPERATION_ALTER_CONFIGS + // ACLOperationIdempotentWrite represents an idempotent write operation + ACLOperationIdempotentWrite ACLOperation = C.RD_KAFKA_ACL_OPERATION_IDEMPOTENT_WRITE +) + +// String returns the human-readable representation of an ACLOperation +func (o ACLOperation) String() string { + return C.GoString(C.rd_kafka_AclOperation_name(C.rd_kafka_AclOperation_t(o))) +} + +// ACLOperationFromString translates a ACL operation name to +// a ACLOperation value. +func ACLOperationFromString(aclOperationString string) (ACLOperation, error) { + switch strings.ToUpper(aclOperationString) { + case "ANY": + return ACLOperationAny, nil + case "ALL": + return ACLOperationAll, nil + case "READ": + return ACLOperationRead, nil + case "WRITE": + return ACLOperationWrite, nil + case "CREATE": + return ACLOperationCreate, nil + case "DELETE": + return ACLOperationDelete, nil + case "ALTER": + return ACLOperationAlter, nil + case "DESCRIBE": + return ACLOperationDescribe, nil + case "CLUSTER_ACTION": + return ACLOperationClusterAction, nil + case "DESCRIBE_CONFIGS": + return ACLOperationDescribeConfigs, nil + case "ALTER_CONFIGS": + return ACLOperationAlterConfigs, nil + case "IDEMPOTENT_WRITE": + return ACLOperationIdempotentWrite, nil + default: + return ACLOperationUnknown, NewError(ErrInvalidArg, "Unknown ACL operation", false) + } +} + +// ACLPermissionType enumerates the different types of ACL permission types. +type ACLPermissionType int + +const ( + // ACLPermissionTypeUnknown represents an unknown ACLPermissionType + ACLPermissionTypeUnknown ACLPermissionType = C.RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN + // ACLPermissionTypeAny in a filter, matches any ACLPermissionType + ACLPermissionTypeAny ACLPermissionType = C.RD_KAFKA_ACL_PERMISSION_TYPE_ANY + // ACLPermissionTypeDeny disallows access + ACLPermissionTypeDeny ACLPermissionType = C.RD_KAFKA_ACL_PERMISSION_TYPE_DENY + // ACLPermissionTypeAllow grants access + ACLPermissionTypeAllow ACLPermissionType = C.RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW +) + +// String returns the human-readable representation of an ACLPermissionType +func (o ACLPermissionType) String() string { + return C.GoString(C.rd_kafka_AclPermissionType_name(C.rd_kafka_AclPermissionType_t(o))) +} + +// ACLPermissionTypeFromString translates a ACL permission type name to +// a ACLPermissionType value. +func ACLPermissionTypeFromString(aclPermissionTypeString string) (ACLPermissionType, error) { + switch strings.ToUpper(aclPermissionTypeString) { + case "ANY": + return ACLPermissionTypeAny, nil + case "DENY": + return ACLPermissionTypeDeny, nil + case "ALLOW": + return ACLPermissionTypeAllow, nil + default: + return ACLPermissionTypeUnknown, NewError(ErrInvalidArg, "Unknown ACL permission type", false) + } +} + +// ACLBinding specifies the operation and permission type for a specific principal +// over one or more resources of the same type. Used by `AdminClient.CreateACLs`, +// returned by `AdminClient.DescribeACLs` and `AdminClient.DeleteACLs`. +type ACLBinding struct { + Type ResourceType // The resource type. + // The resource name, which depends on the resource type. + // For ResourceBroker the resource name is the broker id. + Name string + ResourcePatternType ResourcePatternType // The resource pattern, relative to the name. + Principal string // The principal this ACLBinding refers to. + Host string // The host that the call is allowed to come from. + Operation ACLOperation // The operation/s specified by this binding. + PermissionType ACLPermissionType // The permission type for the specified operation. +} + +// ACLBindingFilter specifies a filter used to return a list of ACL bindings matching some or all of its attributes. +// Used by `AdminClient.DescribeACLs` and `AdminClient.DeleteACLs`. +type ACLBindingFilter = ACLBinding + +// ACLBindings is a slice of ACLBinding that also implements +// the sort interface +type ACLBindings []ACLBinding + +// ACLBindingFilters is a slice of ACLBindingFilter that also implements +// the sort interface +type ACLBindingFilters []ACLBindingFilter + +func (a ACLBindings) Len() int { + return len(a) +} + +func (a ACLBindings) Less(i, j int) bool { + if a[i].Type != a[j].Type { + return a[i].Type < a[j].Type + } + if a[i].Name != a[j].Name { + return a[i].Name < a[j].Name + } + if a[i].ResourcePatternType != a[j].ResourcePatternType { + return a[i].ResourcePatternType < a[j].ResourcePatternType + } + if a[i].Principal != a[j].Principal { + return a[i].Principal < a[j].Principal + } + if a[i].Host != a[j].Host { + return a[i].Host < a[j].Host + } + if a[i].Operation != a[j].Operation { + return a[i].Operation < a[j].Operation + } + if a[i].PermissionType != a[j].PermissionType { + return a[i].PermissionType < a[j].PermissionType + } + return true +} + +func (a ACLBindings) Swap(i, j int) { + a[i], a[j] = a[j], a[i] +} + +// CreateACLResult provides create ACL error information. +type CreateACLResult struct { + // Error, if any, of result. Check with `Error.Code() != ErrNoError`. + Error Error +} + +// DescribeACLsResult provides describe ACLs result or error information. +type DescribeACLsResult struct { + // Slice of ACL bindings matching the provided filter + ACLBindings ACLBindings + // Error, if any, of result. Check with `Error.Code() != ErrNoError`. + Error Error +} + +// DeleteACLsResult provides delete ACLs result or error information. +type DeleteACLsResult = DescribeACLsResult + +// ScramMechanism enumerates SASL/SCRAM mechanisms. +// Used by `AdminClient.AlterUserScramCredentials` +// and `AdminClient.DescribeUserScramCredentials`. +type ScramMechanism int + +const ( + // ScramMechanismUnknown - Unknown SASL/SCRAM mechanism + ScramMechanismUnknown ScramMechanism = C.RD_KAFKA_SCRAM_MECHANISM_UNKNOWN + // ScramMechanismSHA256 - SCRAM-SHA-256 mechanism + ScramMechanismSHA256 ScramMechanism = C.RD_KAFKA_SCRAM_MECHANISM_SHA_256 + // ScramMechanismSHA512 - SCRAM-SHA-512 mechanism + ScramMechanismSHA512 ScramMechanism = C.RD_KAFKA_SCRAM_MECHANISM_SHA_512 +) + +// String returns the human-readable representation of an ScramMechanism +func (o ScramMechanism) String() string { + switch o { + case ScramMechanismSHA256: + return "SCRAM-SHA-256" + case ScramMechanismSHA512: + return "SCRAM-SHA-512" + default: + return "UNKNOWN" + } +} + +// ScramMechanismFromString translates a Scram Mechanism name to +// a ScramMechanism value. +func ScramMechanismFromString(mechanism string) (ScramMechanism, error) { + switch strings.ToUpper(mechanism) { + case "SCRAM-SHA-256": + return ScramMechanismSHA256, nil + case "SCRAM-SHA-512": + return ScramMechanismSHA512, nil + default: + return ScramMechanismUnknown, + NewError(ErrInvalidArg, "Unknown SCRAM mechanism", false) + } +} + +// ScramCredentialInfo contains Mechanism and Iterations for a +// SASL/SCRAM credential associated with a user. +type ScramCredentialInfo struct { + // Iterations - positive number of iterations used when creating the credential + Iterations int + // Mechanism - SASL/SCRAM mechanism + Mechanism ScramMechanism +} + +// UserScramCredentialsDescription represent all SASL/SCRAM credentials +// associated with a user that can be retrieved, or an error indicating +// why credentials could not be retrieved. +type UserScramCredentialsDescription struct { + // User - the user name. + User string + // ScramCredentialInfos - SASL/SCRAM credential representations for the user. + ScramCredentialInfos []ScramCredentialInfo + // Error - error corresponding to this user description. + Error Error +} + +// UserScramCredentialDeletion is a request to delete +// a SASL/SCRAM credential for a user. +type UserScramCredentialDeletion struct { + // User - user name + User string + // Mechanism - SASL/SCRAM mechanism. + Mechanism ScramMechanism +} + +// UserScramCredentialUpsertion is a request to update/insert +// a SASL/SCRAM credential for a user. +type UserScramCredentialUpsertion struct { + // User - user name + User string + // ScramCredentialInfo - the mechanism and iterations. + ScramCredentialInfo ScramCredentialInfo + // Password - password to HMAC before storage. + Password []byte + // Salt - salt to use. Will be generated randomly if nil. (optional) + Salt []byte +} + +// DescribeUserScramCredentialsResult represents the result of a +// DescribeUserScramCredentials call. +type DescribeUserScramCredentialsResult struct { + // Descriptions - Map from user name + // to UserScramCredentialsDescription + Descriptions map[string]UserScramCredentialsDescription +} + +// AlterUserScramCredentialsResult represents the result of a +// AlterUserScramCredentials call. +type AlterUserScramCredentialsResult struct { + // Errors - Map from user name + // to an Error, with ErrNoError code on success. + Errors map[string]Error +} + +// OffsetSpec specifies desired offsets while using ListOffsets. +type OffsetSpec int64 + +const ( + // MaxTimestampOffsetSpec is used to describe the offset with the Max Timestamp which may be different then LatestOffsetSpec as Timestamp can be set client side. + MaxTimestampOffsetSpec OffsetSpec = C.RD_KAFKA_OFFSET_SPEC_MAX_TIMESTAMP + // EarliestOffsetSpec is used to describe the earliest offset for the TopicPartition. + EarliestOffsetSpec OffsetSpec = C.RD_KAFKA_OFFSET_SPEC_EARLIEST + // LatestOffsetSpec is used to describe the latest offset for the TopicPartition. + LatestOffsetSpec OffsetSpec = C.RD_KAFKA_OFFSET_SPEC_LATEST +) + +// NewOffsetSpecForTimestamp creates an OffsetSpec corresponding to the timestamp. +func NewOffsetSpecForTimestamp(timestamp int64) OffsetSpec { + return OffsetSpec(timestamp) +} + +// ListOffsetsResultInfo describes the result of ListOffsets request for a Topic Partition. +type ListOffsetsResultInfo struct { + Offset Offset + Timestamp int64 + LeaderEpoch *int32 + Error Error +} + +// ListOffsetsResult holds the map of TopicPartition to ListOffsetsResultInfo for a request. +type ListOffsetsResult struct { + ResultInfos map[TopicPartition]ListOffsetsResultInfo +} + +// ElectionType represents the type of election to be performed +type ElectionType int + +const ( + // ElectionTypePreferred - Preferred election type + ElectionTypePreferred ElectionType = C.RD_KAFKA_ELECTION_TYPE_PREFERRED + // ElectionTypeUnclean - Unclean election type + ElectionTypeUnclean ElectionType = C.RD_KAFKA_ELECTION_TYPE_UNCLEAN +) + +// ElectionTypeFromString translates an election type name to +// an ElectionType value. +func ElectionTypeFromString(electionTypeString string) (ElectionType, error) { + switch strings.ToUpper(electionTypeString) { + case "PREFERRED": + return ElectionTypePreferred, nil + case "UNCLEAN": + return ElectionTypeUnclean, nil + default: + return ElectionTypePreferred, NewError(ErrInvalidArg, "Unknown election type", false) + } +} + +// ElectLeadersRequest holds parameters for the type of election to be performed and +// the topic partitions for which election has to be performed +type ElectLeadersRequest struct { + // Election type to be performed + electionType ElectionType + // TopicPartitions for which election has to be performed + partitions []TopicPartition +} + +// NewElectLeadersRequest creates a new ElectLeadersRequest with the given election type +// and topic partitions +func NewElectLeadersRequest(electionType ElectionType, partitions []TopicPartition) ElectLeadersRequest { + return ElectLeadersRequest{ + electionType: electionType, + partitions: partitions, + } +} + +// ElectLeadersResult holds the result of the election performed +type ElectLeadersResult struct { + // TopicPartitions for which election has been performed and the per-partition error, if any + // that occurred while running the election for the specific TopicPartition. + TopicPartitions []TopicPartition +} + +// waitResult waits for a result event on cQueue or the ctx to be cancelled, whichever happens +// first. +// The returned result event is checked for errors its error is returned if set. +func (a *AdminClient) waitResult(ctx context.Context, cQueue *C.rd_kafka_queue_t, cEventType C.rd_kafka_event_type_t) (rkev *C.rd_kafka_event_t, err error) { + resultChan := make(chan *C.rd_kafka_event_t) + closeChan := make(chan bool) // never written to, just closed + + go func() { + for { + select { + case _, ok := <-closeChan: + if !ok { + // Context cancelled/timed out + close(resultChan) + return + } + + default: + // Wait for result event for at most 50ms + // to avoid blocking for too long if + // context is cancelled. + rkev := C.rd_kafka_queue_poll(cQueue, 50) + if rkev != nil { + resultChan <- rkev + close(resultChan) + return + } + } + } + }() + + select { + case rkev = <-resultChan: + // Result type check + if cEventType != C.rd_kafka_event_type(rkev) { + err = newErrorFromString(ErrInvalidType, + fmt.Sprintf("Expected %d result event, not %d", (int)(cEventType), (int)(C.rd_kafka_event_type(rkev)))) + C.rd_kafka_event_destroy(rkev) + return nil, err + } + + // Generic error handling + cErr := C.rd_kafka_event_error(rkev) + if cErr != 0 { + err = newErrorFromCString(cErr, C.rd_kafka_event_error_string(rkev)) + C.rd_kafka_event_destroy(rkev) + return nil, err + } + close(closeChan) + return rkev, nil + case <-ctx.Done(): + // signal close to go-routine + close(closeChan) + // wait for close from go-routine to make sure it is done + // using cQueue before we return. + rkev, ok := <-resultChan + if ok { + // throw away result since context was cancelled + C.rd_kafka_event_destroy(rkev) + } + return nil, ctx.Err() + } +} + +// cToConsumerGroupResults converts a C group_result_t array to Go ConsumerGroupResult list. +func (a *AdminClient) cToConsumerGroupResults( + cGroupRes **C.rd_kafka_group_result_t, cCnt C.size_t) (result []ConsumerGroupResult, err error) { + result = make([]ConsumerGroupResult, int(cCnt)) + + for idx := 0; idx < int(cCnt); idx++ { + cGroup := C.group_result_by_idx(cGroupRes, cCnt, C.size_t(idx)) + result[idx].Group = C.GoString(C.rd_kafka_group_result_name(cGroup)) + result[idx].Error = newErrorFromCError(C.rd_kafka_group_result_error(cGroup)) + } + + return result, nil +} + +// cToTopicResults converts a C topic_result_t array to Go TopicResult list. +func (a *AdminClient) cToTopicResults(cTopicRes **C.rd_kafka_topic_result_t, cCnt C.size_t) (result []TopicResult, err error) { + result = make([]TopicResult, int(cCnt)) + + for i := 0; i < int(cCnt); i++ { + cTopic := C.topic_result_by_idx(cTopicRes, cCnt, C.size_t(i)) + result[i].Topic = C.GoString(C.rd_kafka_topic_result_name(cTopic)) + result[i].Error = newErrorFromCString( + C.rd_kafka_topic_result_error(cTopic), + C.rd_kafka_topic_result_error_string(cTopic)) + } + + return result, nil +} + +// cToAuthorizedOperations converts a C AclOperation_t array to a Go +// ACLOperation list. +func (a *AdminClient) cToAuthorizedOperations( + cAuthorizedOperations *C.rd_kafka_AclOperation_t, + cAuthorizedOperationCnt C.size_t) []ACLOperation { + if cAuthorizedOperations == nil { + return nil + } + + authorizedOperations := make([]ACLOperation, int(cAuthorizedOperationCnt)) + for i := 0; i < int(cAuthorizedOperationCnt); i++ { + cAuthorizedOperation := C.AclOperation_by_idx( + cAuthorizedOperations, cAuthorizedOperationCnt, C.size_t(i)) + authorizedOperations[i] = ACLOperation(cAuthorizedOperation) + } + + return authorizedOperations +} + +// cToUUID converts a C rd_kafka_Uuid_t to a Go UUID. +func (a *AdminClient) cToUUID(cUUID *C.rd_kafka_Uuid_t) UUID { + uuid := UUID{ + mostSignificantBits: int64(C.rd_kafka_Uuid_most_significant_bits(cUUID)), + leastSignificantBits: int64(C.rd_kafka_Uuid_least_significant_bits(cUUID)), + base64str: C.GoString(C.rd_kafka_Uuid_base64str(cUUID)), + } + return uuid +} + +// cToNode converts a C Node_t* to a Go Node. +// If cNode is nil returns a Node with ID: -1. +func (a *AdminClient) cToNode(cNode *C.rd_kafka_Node_t) Node { + if cNode == nil { + return Node{ + ID: -1, + } + } + + node := Node{ + ID: int(C.rd_kafka_Node_id(cNode)), + Host: C.GoString(C.rd_kafka_Node_host(cNode)), + Port: int(C.rd_kafka_Node_port(cNode)), + } + + cRack := C.rd_kafka_Node_rack(cNode) + if cRack != nil { + rackID := C.GoString(cRack) + node.Rack = &rackID + } + + return node +} + +// cToNodePtr converts a C Node_t* to a Go *Node. +func (a *AdminClient) cToNodePtr(cNode *C.rd_kafka_Node_t) *Node { + if cNode == nil { + return nil + } + + node := a.cToNode(cNode) + return &node +} + +// cToNode converts a C Node_t array to a Go Node list. +func (a *AdminClient) cToNodes( + cNodes **C.rd_kafka_Node_t, cNodeCnt C.size_t) []Node { + nodes := make([]Node, int(cNodeCnt)) + for i := 0; i < int(cNodeCnt); i++ { + cNode := C.Node_by_idx(cNodes, cNodeCnt, C.size_t(i)) + nodes[i] = a.cToNode(cNode) + } + return nodes +} + +// cToConsumerGroupDescriptions converts a C rd_kafka_ConsumerGroupDescription_t +// array to a Go ConsumerGroupDescription slice. +func (a *AdminClient) cToConsumerGroupDescriptions( + cGroups **C.rd_kafka_ConsumerGroupDescription_t, + cGroupCount C.size_t) (result []ConsumerGroupDescription) { + result = make([]ConsumerGroupDescription, cGroupCount) + for idx := 0; idx < int(cGroupCount); idx++ { + cGroup := C.ConsumerGroupDescription_by_idx( + cGroups, cGroupCount, C.size_t(idx)) + + groupID := C.GoString( + C.rd_kafka_ConsumerGroupDescription_group_id(cGroup)) + err := newErrorFromCError( + C.rd_kafka_ConsumerGroupDescription_error(cGroup)) + isSimple := cint2bool( + C.rd_kafka_ConsumerGroupDescription_is_simple_consumer_group(cGroup)) + paritionAssignor := C.GoString( + C.rd_kafka_ConsumerGroupDescription_partition_assignor(cGroup)) + state := ConsumerGroupState( + C.rd_kafka_ConsumerGroupDescription_state(cGroup)) + + cNode := C.rd_kafka_ConsumerGroupDescription_coordinator(cGroup) + coordinator := a.cToNode(cNode) + + membersCount := int( + C.rd_kafka_ConsumerGroupDescription_member_count(cGroup)) + members := make([]MemberDescription, membersCount) + + for midx := 0; midx < membersCount; midx++ { + cMember := + C.rd_kafka_ConsumerGroupDescription_member(cGroup, C.size_t(midx)) + cMemberAssignment := + C.rd_kafka_MemberDescription_assignment(cMember) + cToppars := + C.rd_kafka_MemberAssignment_partitions(cMemberAssignment) + memberAssignment := MemberAssignment{} + if cToppars != nil { + memberAssignment.TopicPartitions = newTopicPartitionsFromCparts(cToppars) + } + members[midx] = MemberDescription{ + ClientID: C.GoString( + C.rd_kafka_MemberDescription_client_id(cMember)), + GroupInstanceID: C.GoString( + C.rd_kafka_MemberDescription_group_instance_id(cMember)), + ConsumerID: C.GoString( + C.rd_kafka_MemberDescription_consumer_id(cMember)), + Host: C.GoString( + C.rd_kafka_MemberDescription_host(cMember)), + Assignment: memberAssignment, + } + } + + cAuthorizedOperationsCnt := C.size_t(0) + cAuthorizedOperations := C.rd_kafka_ConsumerGroupDescription_authorized_operations( + cGroup, &cAuthorizedOperationsCnt) + authorizedOperations := a.cToAuthorizedOperations(cAuthorizedOperations, + cAuthorizedOperationsCnt) + + result[idx] = ConsumerGroupDescription{ + GroupID: groupID, + Error: err, + IsSimpleConsumerGroup: isSimple, + PartitionAssignor: paritionAssignor, + State: state, + Coordinator: coordinator, + Members: members, + AuthorizedOperations: authorizedOperations, + } + } + return result +} + +// cToTopicPartitionInfo converts a C TopicPartitionInfo_t into a Go +// TopicPartitionInfo. +func (a *AdminClient) cToTopicPartitionInfo( + partitionInfo *C.rd_kafka_TopicPartitionInfo_t) TopicPartitionInfo { + cPartitionID := C.rd_kafka_TopicPartitionInfo_partition(partitionInfo) + info := TopicPartitionInfo{ + Partition: int(cPartitionID), + } + + cLeader := C.rd_kafka_TopicPartitionInfo_leader(partitionInfo) + info.Leader = a.cToNodePtr(cLeader) + + cReplicaCnt := C.size_t(0) + cReplicas := C.rd_kafka_TopicPartitionInfo_replicas( + partitionInfo, &cReplicaCnt) + info.Replicas = a.cToNodes(cReplicas, cReplicaCnt) + + cIsrCnt := C.size_t(0) + cIsr := C.rd_kafka_TopicPartitionInfo_isr(partitionInfo, &cIsrCnt) + info.Isr = a.cToNodes(cIsr, cIsrCnt) + + return info +} + +// cToTopicDescriptions converts a C TopicDescription_t +// array to a Go TopicDescription list. +func (a *AdminClient) cToTopicDescriptions( + cTopicDescriptions **C.rd_kafka_TopicDescription_t, + cTopicDescriptionCount C.size_t) (result []TopicDescription) { + result = make([]TopicDescription, cTopicDescriptionCount) + for idx := 0; idx < int(cTopicDescriptionCount); idx++ { + cTopic := C.TopicDescription_by_idx( + cTopicDescriptions, cTopicDescriptionCount, C.size_t(idx)) + + topicName := C.GoString( + C.rd_kafka_TopicDescription_name(cTopic)) + TopicID := a.cToUUID(C.rd_kafka_TopicDescription_topic_id(cTopic)) + err := newErrorFromCError( + C.rd_kafka_TopicDescription_error(cTopic)) + + if err.Code() != ErrNoError { + result[idx] = TopicDescription{ + Name: topicName, + Error: err, + } + continue + } + + cPartitionInfoCnt := C.size_t(0) + cPartitionInfos := C.rd_kafka_TopicDescription_partitions(cTopic, &cPartitionInfoCnt) + + partitions := make([]TopicPartitionInfo, int(cPartitionInfoCnt)) + + for pidx := 0; pidx < int(cPartitionInfoCnt); pidx++ { + cPartitionInfo := C.TopicPartitionInfo_by_idx(cPartitionInfos, cPartitionInfoCnt, C.size_t(pidx)) + partitions[pidx] = a.cToTopicPartitionInfo(cPartitionInfo) + } + + cAuthorizedOperationsCnt := C.size_t(0) + cAuthorizedOperations := C.rd_kafka_TopicDescription_authorized_operations( + cTopic, &cAuthorizedOperationsCnt) + authorizedOperations := a.cToAuthorizedOperations(cAuthorizedOperations, cAuthorizedOperationsCnt) + + result[idx] = TopicDescription{ + Name: topicName, + TopicID: TopicID, + Error: err, + Partitions: partitions, + AuthorizedOperations: authorizedOperations, + } + } + return result +} + +// cToDescribeClusterResult converts a C DescribeTopics_result_t to a Go +// DescribeClusterResult. +func (a *AdminClient) cToDescribeClusterResult( + cResult *C.rd_kafka_DescribeTopics_result_t) (result DescribeClusterResult) { + var clusterIDPtr *string = nil + cClusterID := C.rd_kafka_DescribeCluster_result_cluster_id(cResult) + if cClusterID != nil { + clusterID := C.GoString(cClusterID) + clusterIDPtr = &clusterID + } + + var controller *Node = nil + cController := C.rd_kafka_DescribeCluster_result_controller(cResult) + controller = a.cToNodePtr(cController) + + cNodeCnt := C.size_t(0) + cNodes := C.rd_kafka_DescribeCluster_result_nodes(cResult, &cNodeCnt) + nodes := a.cToNodes(cNodes, cNodeCnt) + + cAuthorizedOperationsCnt := C.size_t(0) + cAuthorizedOperations := + C.rd_kafka_DescribeCluster_result_authorized_operations( + cResult, &cAuthorizedOperationsCnt) + authorizedOperations := a.cToAuthorizedOperations( + cAuthorizedOperations, cAuthorizedOperationsCnt) + + return DescribeClusterResult{ + ClusterID: clusterIDPtr, + Controller: controller, + Nodes: nodes, + AuthorizedOperations: authorizedOperations, + } +} + +// cToDescribeUserScramCredentialsResult converts a C +// rd_kafka_DescribeUserScramCredentials_result_t to a Go map of users to +// UserScramCredentialsDescription. +func cToDescribeUserScramCredentialsResult( + cRes *C.rd_kafka_DescribeUserScramCredentials_result_t) map[string]UserScramCredentialsDescription { + result := make(map[string]UserScramCredentialsDescription) + var cDescriptionCount C.size_t + cDescriptions := + C.rd_kafka_DescribeUserScramCredentials_result_descriptions(cRes, + &cDescriptionCount) + + for i := 0; i < int(cDescriptionCount); i++ { + cDescription := + C.DescribeUserScramCredentials_result_description_by_idx( + cDescriptions, cDescriptionCount, C.size_t(i)) + user := C.GoString(C.rd_kafka_UserScramCredentialsDescription_user(cDescription)) + userDescription := UserScramCredentialsDescription{User: user} + + // Populate the error if required. + cError := C.rd_kafka_UserScramCredentialsDescription_error(cDescription) + if C.rd_kafka_error_code(cError) != C.RD_KAFKA_RESP_ERR_NO_ERROR { + userDescription.Error = newError(C.rd_kafka_error_code(cError)) + result[user] = userDescription + continue + } + + cCredentialCount := C.rd_kafka_UserScramCredentialsDescription_scramcredentialinfo_count(cDescription) + scramCredentialInfos := make([]ScramCredentialInfo, int(cCredentialCount)) + for j := 0; j < int(cCredentialCount); j++ { + cScramCredentialInfo := + C.rd_kafka_UserScramCredentialsDescription_scramcredentialinfo( + cDescription, C.size_t(j)) + cMechanism := C.rd_kafka_ScramCredentialInfo_mechanism(cScramCredentialInfo) + cIterations := C.rd_kafka_ScramCredentialInfo_iterations(cScramCredentialInfo) + scramCredentialInfos[j] = ScramCredentialInfo{ + Mechanism: ScramMechanism(cMechanism), + Iterations: int(cIterations), + } + } + userDescription.ScramCredentialInfos = scramCredentialInfos + result[user] = userDescription + } + return result +} + +// cToListOffsetsResult converts a C +// rd_kafka_ListOffsets_result_t to a Go ListOffsetsResult +func cToListOffsetsResult(cRes *C.rd_kafka_ListOffsets_result_t) (result ListOffsetsResult) { + result = ListOffsetsResult{ResultInfos: make(map[TopicPartition]ListOffsetsResultInfo)} + var cPartitionCount C.size_t + cResultInfos := C.rd_kafka_ListOffsets_result_infos(cRes, &cPartitionCount) + for itr := 0; itr < int(cPartitionCount); itr++ { + cResultInfo := C.ListOffsetsResultInfo_by_idx(cResultInfos, cPartitionCount, C.size_t(itr)) + resultInfo := ListOffsetsResultInfo{} + cPartition := C.rd_kafka_ListOffsetsResultInfo_topic_partition(cResultInfo) + Topic := C.GoString(cPartition.topic) + Partition := TopicPartition{Topic: &Topic, Partition: int32(cPartition.partition)} + resultInfo.Offset = Offset(cPartition.offset) + resultInfo.Timestamp = int64(C.rd_kafka_ListOffsetsResultInfo_timestamp(cResultInfo)) + cLeaderEpoch := int32(C.rd_kafka_topic_partition_get_leader_epoch(cPartition)) + if cLeaderEpoch >= 0 { + resultInfo.LeaderEpoch = &cLeaderEpoch + } + resultInfo.Error = newError(cPartition.err) + result.ResultInfos[Partition] = resultInfo + } + return result +} + +// ConsumerGroupDescription converts a C rd_kafka_ConsumerGroupListing_t array +// to a Go ConsumerGroupListing slice. +func (a *AdminClient) cToConsumerGroupListings( + cGroups **C.rd_kafka_ConsumerGroupListing_t, + cGroupCount C.size_t) (result []ConsumerGroupListing) { + result = make([]ConsumerGroupListing, cGroupCount) + + for idx := 0; idx < int(cGroupCount); idx++ { + cGroup := + C.ConsumerGroupListing_by_idx(cGroups, cGroupCount, C.size_t(idx)) + state := ConsumerGroupState( + C.rd_kafka_ConsumerGroupListing_state(cGroup)) + groupType := ConsumerGroupType(C.rd_kafka_ConsumerGroupListing_type(cGroup)) + result[idx] = ConsumerGroupListing{ + GroupID: C.GoString( + C.rd_kafka_ConsumerGroupListing_group_id(cGroup)), + IsSimpleConsumerGroup: cint2bool( + C.rd_kafka_ConsumerGroupListing_is_simple_consumer_group(cGroup)), + State: state, + Type: groupType, + } + } + return result +} + +// cToErrorList converts a C rd_kafka_error_t array to a Go errors slice. +func (a *AdminClient) cToErrorList( + cErrs **C.rd_kafka_error_t, cErrCount C.size_t) (errs []error) { + errs = make([]error, cErrCount) + + for idx := 0; idx < int(cErrCount); idx++ { + cErr := C.error_by_idx(cErrs, cErrCount, C.size_t(idx)) + errs[idx] = newErrorFromCError(cErr) + } + + return errs +} + +// cConfigResourceToResult converts a C ConfigResource result array to Go ConfigResourceResult +func (a *AdminClient) cConfigResourceToResult(cRes **C.rd_kafka_ConfigResource_t, cCnt C.size_t) (result []ConfigResourceResult, err error) { + result = make([]ConfigResourceResult, int(cCnt)) + + for i := 0; i < int(cCnt); i++ { + cRes := C.ConfigResource_by_idx(cRes, cCnt, C.size_t(i)) + result[i].Type = ResourceType(C.rd_kafka_ConfigResource_type(cRes)) + result[i].Name = C.GoString(C.rd_kafka_ConfigResource_name(cRes)) + result[i].Error = newErrorFromCString( + C.rd_kafka_ConfigResource_error(cRes), + C.rd_kafka_ConfigResource_error_string(cRes)) + var cConfigCnt C.size_t + cConfigs := C.rd_kafka_ConfigResource_configs(cRes, &cConfigCnt) + if cConfigCnt > 0 { + result[i].Config = make(map[string]ConfigEntryResult) + } + for ci := 0; ci < int(cConfigCnt); ci++ { + cEntry := C.ConfigEntry_by_idx(cConfigs, cConfigCnt, C.size_t(ci)) + entry := configEntryResultFromC(cEntry) + result[i].Config[entry.Name] = entry + } + } + + return result, nil +} + +// setupTopicPartitionFromCtopicPartitionResult sets up a Go TopicPartition from a C rd_kafka_topic_partition_t & C.rd_kafka_error_t. +func setupTopicPartitionFromCtopicPartitionResult(partition *TopicPartition, ctopicPartRes *C.rd_kafka_topic_partition_result_t) { + + setupTopicPartitionFromCrktpar(partition, C.rd_kafka_topic_partition_result_partition(ctopicPartRes)) + partition.Error = newErrorFromCError(C.rd_kafka_topic_partition_result_error(ctopicPartRes)) +} + +// Convert a C rd_kafka_topic_partition_result_t array to a Go TopicPartition list. +func newTopicPartitionsFromCTopicPartitionResult(cResponse **C.rd_kafka_topic_partition_result_t, size C.size_t) (partitions []TopicPartition) { + + partCnt := int(size) + + partitions = make([]TopicPartition, partCnt) + + for i := 0; i < partCnt; i++ { + setupTopicPartitionFromCtopicPartitionResult(&partitions[i], C.TopicPartitionResult_by_idx(cResponse, C.size_t(partCnt), C.size_t(i))) + } + + return partitions +} + +// cToDeletedRecordResult converts a C topic partitions list to a Go DeleteRecordsResult slice. +func cToDeletedRecordResult( + cparts *C.rd_kafka_topic_partition_list_t) (results []DeleteRecordsResult) { + partitions := newTopicPartitionsFromCparts(cparts) + partitionsLen := len(partitions) + results = make([]DeleteRecordsResult, partitionsLen) + + for i := 0; i < partitionsLen; i++ { + results[i].TopicPartition = partitions[i] + if results[i].TopicPartition.Error == nil { + results[i].DeletedRecords = &DeletedRecords{ + LowWatermark: results[i].TopicPartition.Offset} + } + } + + return results +} + +// ClusterID returns the cluster ID as reported in broker metadata. +// +// Note on cancellation: Although the underlying C function respects the +// timeout, it currently cannot be manually cancelled. That means manually +// cancelling the context will block until the C function call returns. +// +// Requires broker version >= 0.10.0. +func (a *AdminClient) ClusterID(ctx context.Context) (clusterID string, err error) { + err = a.verifyClient() + if err != nil { + return "", err + } + + responseChan := make(chan *C.char, 1) + + go func() { + responseChan <- C.rd_kafka_clusterid(a.handle.rk, cTimeoutFromContext(ctx)) + }() + + select { + case <-ctx.Done(): + if cClusterID := <-responseChan; cClusterID != nil { + C.rd_kafka_mem_free(a.handle.rk, unsafe.Pointer(cClusterID)) + } + return "", ctx.Err() + + case cClusterID := <-responseChan: + if cClusterID == nil { // C timeout + <-ctx.Done() + return "", ctx.Err() + } + defer C.rd_kafka_mem_free(a.handle.rk, unsafe.Pointer(cClusterID)) + return C.GoString(cClusterID), nil + } +} + +// ControllerID returns the broker ID of the current controller as reported in +// broker metadata. +// +// Note on cancellation: Although the underlying C function respects the +// timeout, it currently cannot be manually cancelled. That means manually +// cancelling the context will block until the C function call returns. +// +// Requires broker version >= 0.10.0. +func (a *AdminClient) ControllerID(ctx context.Context) (controllerID int32, err error) { + err = a.verifyClient() + if err != nil { + return -1, err + } + + responseChan := make(chan int32, 1) + + go func() { + responseChan <- int32(C.rd_kafka_controllerid(a.handle.rk, cTimeoutFromContext(ctx))) + }() + + select { + case <-ctx.Done(): + <-responseChan + return 0, ctx.Err() + + case controllerID := <-responseChan: + if controllerID < 0 { // C timeout + <-ctx.Done() + return 0, ctx.Err() + } + return controllerID, nil + } +} + +// CreateTopics creates topics in cluster. +// +// The list of TopicSpecification objects define the per-topic partition count, replicas, etc. +// +// Topic creation is non-atomic and may succeed for some topics but fail for others, +// make sure to check the result for topic-specific errors. +// +// Note: TopicSpecification is analogous to NewTopic in the Java Topic Admin API. +func (a *AdminClient) CreateTopics(ctx context.Context, topics []TopicSpecification, options ...CreateTopicsAdminOption) (result []TopicResult, err error) { + err = a.verifyClient() + if err != nil { + return nil, err + } + + cTopics := make([]*C.rd_kafka_NewTopic_t, len(topics)) + + cErrstrSize := C.size_t(512) + cErrstr := (*C.char)(C.malloc(cErrstrSize)) + defer C.free(unsafe.Pointer(cErrstr)) + + // Convert Go TopicSpecifications to C TopicSpecifications + for i, topic := range topics { + + var cReplicationFactor C.int + if topic.ReplicationFactor == 0 { + cReplicationFactor = -1 + } else { + cReplicationFactor = C.int(topic.ReplicationFactor) + } + if topic.ReplicaAssignment != nil { + if cReplicationFactor != -1 { + return nil, newErrorFromString(ErrInvalidArg, + "TopicSpecification.ReplicationFactor and TopicSpecification.ReplicaAssignment are mutually exclusive") + } + + if len(topic.ReplicaAssignment) != topic.NumPartitions { + return nil, newErrorFromString(ErrInvalidArg, + "TopicSpecification.ReplicaAssignment must contain exactly TopicSpecification.NumPartitions partitions") + } + } + + cTopics[i] = C.rd_kafka_NewTopic_new( + C.CString(topic.Topic), + C.int(topic.NumPartitions), + cReplicationFactor, + cErrstr, cErrstrSize) + if cTopics[i] == nil { + return nil, newErrorFromString(ErrInvalidArg, + fmt.Sprintf("Topic %s: %s", topic.Topic, C.GoString(cErrstr))) + } + + defer C.rd_kafka_NewTopic_destroy(cTopics[i]) + + for p, replicas := range topic.ReplicaAssignment { + cReplicas := make([]C.int32_t, len(replicas)) + for ri, replica := range replicas { + cReplicas[ri] = C.int32_t(replica) + } + cErr := C.rd_kafka_NewTopic_set_replica_assignment( + cTopics[i], C.int32_t(p), + (*C.int32_t)(&cReplicas[0]), C.size_t(len(cReplicas)), + cErrstr, cErrstrSize) + if cErr != 0 { + return nil, newCErrorFromString(cErr, + fmt.Sprintf("Failed to set replica assignment for topic %s partition %d: %s", topic.Topic, p, C.GoString(cErrstr))) + } + } + + for key, value := range topic.Config { + cErr := C.rd_kafka_NewTopic_set_config( + cTopics[i], + C.CString(key), C.CString(value)) + if cErr != 0 { + return nil, newCErrorFromString(cErr, + fmt.Sprintf("Failed to set config %s=%s for topic %s", key, value, topic.Topic)) + } + } + } + + // Convert Go AdminOptions (if any) to C AdminOptions + genericOptions := make([]AdminOption, len(options)) + for i := range options { + genericOptions[i] = options[i] + } + cOptions, err := adminOptionsSetup(a.handle, C.RD_KAFKA_ADMIN_OP_CREATETOPICS, genericOptions) + if err != nil { + return nil, err + } + defer C.rd_kafka_AdminOptions_destroy(cOptions) + + // Create temporary queue for async operation + cQueue := C.rd_kafka_queue_new(a.handle.rk) + defer C.rd_kafka_queue_destroy(cQueue) + + // Asynchronous call + C.rd_kafka_CreateTopics( + a.handle.rk, + (**C.rd_kafka_NewTopic_t)(&cTopics[0]), + C.size_t(len(cTopics)), + cOptions, + cQueue) + + // Wait for result, error or context timeout + rkev, err := a.waitResult(ctx, cQueue, C.RD_KAFKA_EVENT_CREATETOPICS_RESULT) + if err != nil { + return nil, err + } + defer C.rd_kafka_event_destroy(rkev) + + cRes := C.rd_kafka_event_CreateTopics_result(rkev) + + // Convert result from C to Go + var cCnt C.size_t + cTopicRes := C.rd_kafka_CreateTopics_result_topics(cRes, &cCnt) + + return a.cToTopicResults(cTopicRes, cCnt) +} + +// DeleteTopics deletes a batch of topics. +// +// This operation is not transactional and may succeed for a subset of topics while +// failing others. +// It may take several seconds after the DeleteTopics result returns success for +// all the brokers to become aware that the topics are gone. During this time, +// topic metadata and configuration may continue to return information about deleted topics. +// +// Requires broker version >= 0.10.1.0 +func (a *AdminClient) DeleteTopics(ctx context.Context, topics []string, options ...DeleteTopicsAdminOption) (result []TopicResult, err error) { + err = a.verifyClient() + if err != nil { + return nil, err + } + + cTopics := make([]*C.rd_kafka_DeleteTopic_t, len(topics)) + + cErrstrSize := C.size_t(512) + cErrstr := (*C.char)(C.malloc(cErrstrSize)) + defer C.free(unsafe.Pointer(cErrstr)) + + // Convert Go DeleteTopics to C DeleteTopics + for i, topic := range topics { + cTopics[i] = C.rd_kafka_DeleteTopic_new(C.CString(topic)) + if cTopics[i] == nil { + return nil, newErrorFromString(ErrInvalidArg, + fmt.Sprintf("Invalid arguments for topic %s", topic)) + } + + defer C.rd_kafka_DeleteTopic_destroy(cTopics[i]) + } + + // Convert Go AdminOptions (if any) to C AdminOptions + genericOptions := make([]AdminOption, len(options)) + for i := range options { + genericOptions[i] = options[i] + } + cOptions, err := adminOptionsSetup(a.handle, C.RD_KAFKA_ADMIN_OP_DELETETOPICS, genericOptions) + if err != nil { + return nil, err + } + defer C.rd_kafka_AdminOptions_destroy(cOptions) + + // Create temporary queue for async operation + cQueue := C.rd_kafka_queue_new(a.handle.rk) + defer C.rd_kafka_queue_destroy(cQueue) + + // Asynchronous call + C.rd_kafka_DeleteTopics( + a.handle.rk, + (**C.rd_kafka_DeleteTopic_t)(&cTopics[0]), + C.size_t(len(cTopics)), + cOptions, + cQueue) + + // Wait for result, error or context timeout + rkev, err := a.waitResult(ctx, cQueue, C.RD_KAFKA_EVENT_DELETETOPICS_RESULT) + if err != nil { + return nil, err + } + defer C.rd_kafka_event_destroy(rkev) + + cRes := C.rd_kafka_event_DeleteTopics_result(rkev) + + // Convert result from C to Go + var cCnt C.size_t + cTopicRes := C.rd_kafka_DeleteTopics_result_topics(cRes, &cCnt) + + return a.cToTopicResults(cTopicRes, cCnt) +} + +// CreatePartitions creates additional partitions for topics. +func (a *AdminClient) CreatePartitions(ctx context.Context, partitions []PartitionsSpecification, options ...CreatePartitionsAdminOption) (result []TopicResult, err error) { + err = a.verifyClient() + if err != nil { + return nil, err + } + + cParts := make([]*C.rd_kafka_NewPartitions_t, len(partitions)) + + cErrstrSize := C.size_t(512) + cErrstr := (*C.char)(C.malloc(cErrstrSize)) + defer C.free(unsafe.Pointer(cErrstr)) + + // Convert Go PartitionsSpecification to C NewPartitions + for i, part := range partitions { + cParts[i] = C.rd_kafka_NewPartitions_new(C.CString(part.Topic), C.size_t(part.IncreaseTo), cErrstr, cErrstrSize) + if cParts[i] == nil { + return nil, newErrorFromString(ErrInvalidArg, + fmt.Sprintf("Topic %s: %s", part.Topic, C.GoString(cErrstr))) + } + + defer C.rd_kafka_NewPartitions_destroy(cParts[i]) + + for pidx, replicas := range part.ReplicaAssignment { + cReplicas := make([]C.int32_t, len(replicas)) + for ri, replica := range replicas { + cReplicas[ri] = C.int32_t(replica) + } + cErr := C.rd_kafka_NewPartitions_set_replica_assignment( + cParts[i], C.int32_t(pidx), + (*C.int32_t)(&cReplicas[0]), C.size_t(len(cReplicas)), + cErrstr, cErrstrSize) + if cErr != 0 { + return nil, newCErrorFromString(cErr, + fmt.Sprintf("Failed to set replica assignment for topic %s new partition index %d: %s", part.Topic, pidx, C.GoString(cErrstr))) + } + } + + } + + // Convert Go AdminOptions (if any) to C AdminOptions + genericOptions := make([]AdminOption, len(options)) + for i := range options { + genericOptions[i] = options[i] + } + cOptions, err := adminOptionsSetup(a.handle, C.RD_KAFKA_ADMIN_OP_CREATEPARTITIONS, genericOptions) + if err != nil { + return nil, err + } + defer C.rd_kafka_AdminOptions_destroy(cOptions) + + // Create temporary queue for async operation + cQueue := C.rd_kafka_queue_new(a.handle.rk) + defer C.rd_kafka_queue_destroy(cQueue) + + // Asynchronous call + C.rd_kafka_CreatePartitions( + a.handle.rk, + (**C.rd_kafka_NewPartitions_t)(&cParts[0]), + C.size_t(len(cParts)), + cOptions, + cQueue) + + // Wait for result, error or context timeout + rkev, err := a.waitResult(ctx, cQueue, C.RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT) + if err != nil { + return nil, err + } + defer C.rd_kafka_event_destroy(rkev) + + cRes := C.rd_kafka_event_CreatePartitions_result(rkev) + + // Convert result from C to Go + var cCnt C.size_t + cTopicRes := C.rd_kafka_CreatePartitions_result_topics(cRes, &cCnt) + + return a.cToTopicResults(cTopicRes, cCnt) +} + +// AlterConfigs alters/updates cluster resource configuration. +// +// Updates are not transactional so they may succeed for a subset +// of the provided resources while others fail. +// The configuration for a particular resource is updated atomically, +// replacing values using the provided ConfigEntrys and reverting +// unspecified ConfigEntrys to their default values. +// +// Requires broker version >=0.11.0.0 +// +// AlterConfigs will replace all existing configuration for +// the provided resources with the new configuration given, +// reverting all other configuration to their default values. +// +// Multiple resources and resource types may be set, but at most one +// resource of type ResourceBroker is allowed per call since these +// resource requests must be sent to the broker specified in the resource. +// Deprecated: AlterConfigs is deprecated in favour of IncrementalAlterConfigs +func (a *AdminClient) AlterConfigs(ctx context.Context, resources []ConfigResource, options ...AlterConfigsAdminOption) (result []ConfigResourceResult, err error) { + err = a.verifyClient() + if err != nil { + return nil, err + } + + cRes := make([]*C.rd_kafka_ConfigResource_t, len(resources)) + + cErrstrSize := C.size_t(512) + cErrstr := (*C.char)(C.malloc(cErrstrSize)) + defer C.free(unsafe.Pointer(cErrstr)) + + // Convert Go ConfigResources to C ConfigResources + for i, res := range resources { + cRes[i] = C.rd_kafka_ConfigResource_new( + C.rd_kafka_ResourceType_t(res.Type), C.CString(res.Name)) + if cRes[i] == nil { + return nil, newErrorFromString(ErrInvalidArg, + fmt.Sprintf("Invalid arguments for resource %v", res)) + } + + defer C.rd_kafka_ConfigResource_destroy(cRes[i]) + + for _, entry := range res.Config { + var cErr C.rd_kafka_resp_err_t + switch entry.Operation { + case AlterOperationSet: + cErr = C.rd_kafka_ConfigResource_set_config( + cRes[i], C.CString(entry.Name), C.CString(entry.Value)) + default: + panic(fmt.Sprintf("Invalid ConfigEntry.Operation: %v", entry.Operation)) + } + + if cErr != 0 { + return nil, + newCErrorFromString(cErr, + fmt.Sprintf("Failed to add configuration %s: %s", + entry, C.GoString(C.rd_kafka_err2str(cErr)))) + } + } + } + + // Convert Go AdminOptions (if any) to C AdminOptions + genericOptions := make([]AdminOption, len(options)) + for i := range options { + genericOptions[i] = options[i] + } + cOptions, err := adminOptionsSetup(a.handle, C.RD_KAFKA_ADMIN_OP_ALTERCONFIGS, genericOptions) + if err != nil { + return nil, err + } + defer C.rd_kafka_AdminOptions_destroy(cOptions) + + // Create temporary queue for async operation + cQueue := C.rd_kafka_queue_new(a.handle.rk) + defer C.rd_kafka_queue_destroy(cQueue) + + // Asynchronous call + C.rd_kafka_AlterConfigs( + a.handle.rk, + (**C.rd_kafka_ConfigResource_t)(&cRes[0]), + C.size_t(len(cRes)), + cOptions, + cQueue) + + // Wait for result, error or context timeout + rkev, err := a.waitResult(ctx, cQueue, C.RD_KAFKA_EVENT_ALTERCONFIGS_RESULT) + if err != nil { + return nil, err + } + defer C.rd_kafka_event_destroy(rkev) + + cResult := C.rd_kafka_event_AlterConfigs_result(rkev) + + // Convert results from C to Go + var cCnt C.size_t + cResults := C.rd_kafka_AlterConfigs_result_resources(cResult, &cCnt) + + return a.cConfigResourceToResult(cResults, cCnt) +} + +// IncrementalAlterConfigs alters/updates cluster resource configuration. +// +// Updates are not transactional so they may succeed for some resources +// while fail for others. The configs for a particular resource are +// updated atomically, executing the corresponding incremental +// operations on the provided configurations. +// +// Requires broker version >=2.3.0 +// +// IncrementalAlterConfigs will only change configurations for provided +// resources with the new configuration given. +// +// Multiple resources and resource types may be set, but at most one +// resource of type ResourceBroker is allowed per call since these +// resource requests must be sent to the broker specified in the resource. +func (a *AdminClient) IncrementalAlterConfigs(ctx context.Context, resources []ConfigResource, options ...AlterConfigsAdminOption) (result []ConfigResourceResult, err error) { + err = a.verifyClient() + if err != nil { + return nil, err + } + + cRes := make([]*C.rd_kafka_ConfigResource_t, len(resources)) + + cErrstrSize := C.size_t(512) + cErrstr := (*C.char)(C.malloc(cErrstrSize)) + defer C.free(unsafe.Pointer(cErrstr)) + + // Convert Go ConfigResources to C ConfigResources + for i, res := range resources { + cName := C.CString(res.Name) + defer C.free(unsafe.Pointer(cName)) + cRes[i] = C.rd_kafka_ConfigResource_new( + C.rd_kafka_ResourceType_t(res.Type), cName) + if cRes[i] == nil { + return nil, newErrorFromString(ErrInvalidArg, + fmt.Sprintf("Invalid arguments for resource %v", res)) + } + + defer C.rd_kafka_ConfigResource_destroy(cRes[i]) + + for _, entry := range res.Config { + cName := C.CString(entry.Name) + defer C.free(unsafe.Pointer(cName)) + cValue := C.CString(entry.Value) + defer C.free(unsafe.Pointer(cValue)) + cError := C.rd_kafka_ConfigResource_add_incremental_config( + cRes[i], cName, + C.rd_kafka_AlterConfigOpType_t(entry.IncrementalOperation), + cValue) + + if cError != nil { + err := newErrorFromCErrorDestroy(cError) + return nil, err + } + } + } + + // Convert Go AdminOptions (if any) to C AdminOptions + genericOptions := make([]AdminOption, len(options)) + for i := range options { + genericOptions[i] = options[i] + } + cOptions, err := adminOptionsSetup(a.handle, C.RD_KAFKA_ADMIN_OP_INCREMENTALALTERCONFIGS, genericOptions) + if err != nil { + return nil, err + } + defer C.rd_kafka_AdminOptions_destroy(cOptions) + + // Create temporary queue for async operation + cQueue := C.rd_kafka_queue_new(a.handle.rk) + defer C.rd_kafka_queue_destroy(cQueue) + + // Asynchronous call + C.rd_kafka_IncrementalAlterConfigs( + a.handle.rk, + (**C.rd_kafka_ConfigResource_t)(&cRes[0]), + C.size_t(len(cRes)), + cOptions, + cQueue) + + // Wait for result, error or context timeout + rkev, err := a.waitResult(ctx, cQueue, C.RD_KAFKA_EVENT_INCREMENTALALTERCONFIGS_RESULT) + if err != nil { + return nil, err + } + defer C.rd_kafka_event_destroy(rkev) + + cResult := C.rd_kafka_event_IncrementalAlterConfigs_result(rkev) + + // Convert results from C to Go + var cCnt C.size_t + cResults := C.rd_kafka_IncrementalAlterConfigs_result_resources(cResult, &cCnt) + + return a.cConfigResourceToResult(cResults, cCnt) +} + +// DescribeConfigs retrieves configuration for cluster resources. +// +// The returned configuration includes default values, use +// ConfigEntryResult.IsDefault or ConfigEntryResult.Source to distinguish +// default values from manually configured settings. +// +// The value of config entries where .IsSensitive is true +// will always be nil to avoid disclosing sensitive +// information, such as security settings. +// +// Configuration entries where .IsReadOnly is true can't be modified +// (with AlterConfigs). +// +// Synonym configuration entries are returned if the broker supports +// it (broker version >= 1.1.0). See .Synonyms. +// +// Requires broker version >=0.11.0.0 +// +// Multiple resources and resource types may be requested, but at most +// one resource of type ResourceBroker is allowed per call +// since these resource requests must be sent to the broker specified +// in the resource. +func (a *AdminClient) DescribeConfigs(ctx context.Context, resources []ConfigResource, options ...DescribeConfigsAdminOption) (result []ConfigResourceResult, err error) { + err = a.verifyClient() + if err != nil { + return nil, err + } + + cRes := make([]*C.rd_kafka_ConfigResource_t, len(resources)) + + cErrstrSize := C.size_t(512) + cErrstr := (*C.char)(C.malloc(cErrstrSize)) + defer C.free(unsafe.Pointer(cErrstr)) + + // Convert Go ConfigResources to C ConfigResources + for i, res := range resources { + cRes[i] = C.rd_kafka_ConfigResource_new( + C.rd_kafka_ResourceType_t(res.Type), C.CString(res.Name)) + if cRes[i] == nil { + return nil, newErrorFromString(ErrInvalidArg, + fmt.Sprintf("Invalid arguments for resource %v", res)) + } + + defer C.rd_kafka_ConfigResource_destroy(cRes[i]) + } + + // Convert Go AdminOptions (if any) to C AdminOptions + genericOptions := make([]AdminOption, len(options)) + for i := range options { + genericOptions[i] = options[i] + } + cOptions, err := adminOptionsSetup(a.handle, C.RD_KAFKA_ADMIN_OP_DESCRIBECONFIGS, genericOptions) + if err != nil { + return nil, err + } + defer C.rd_kafka_AdminOptions_destroy(cOptions) + + // Create temporary queue for async operation + cQueue := C.rd_kafka_queue_new(a.handle.rk) + defer C.rd_kafka_queue_destroy(cQueue) + + // Asynchronous call + C.rd_kafka_DescribeConfigs( + a.handle.rk, + (**C.rd_kafka_ConfigResource_t)(&cRes[0]), + C.size_t(len(cRes)), + cOptions, + cQueue) + + // Wait for result, error or context timeout + rkev, err := a.waitResult(ctx, cQueue, C.RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT) + if err != nil { + return nil, err + } + defer C.rd_kafka_event_destroy(rkev) + + cResult := C.rd_kafka_event_DescribeConfigs_result(rkev) + + // Convert results from C to Go + var cCnt C.size_t + cResults := C.rd_kafka_DescribeConfigs_result_resources(cResult, &cCnt) + + return a.cConfigResourceToResult(cResults, cCnt) +} + +// GetMetadata queries broker for cluster and topic metadata. +// If topic is non-nil only information about that topic is returned, else if +// allTopics is false only information about locally used topics is returned, +// else information about all topics is returned. +// GetMetadata is equivalent to listTopics, describeTopics and describeCluster in the Java API. +func (a *AdminClient) GetMetadata(topic *string, allTopics bool, timeoutMs int) (*Metadata, error) { + err := a.verifyClient() + if err != nil { + return nil, err + } + return getMetadata(a, topic, allTopics, timeoutMs) +} + +// String returns a human readable name for an AdminClient instance +func (a *AdminClient) String() string { + return fmt.Sprintf("admin-%s", a.handle.String()) +} + +// get_handle implements the Handle interface +func (a *AdminClient) gethandle() *handle { + return a.handle +} + +// SetOAuthBearerToken sets the the data to be transmitted +// to a broker during SASL/OAUTHBEARER authentication. It will return nil +// on success, otherwise an error if: +// 1) the token data is invalid (meaning an expiration time in the past +// or either a token value or an extension key or value that does not meet +// the regular expression requirements as per +// https://tools.ietf.org/html/rfc7628#section-3.1); +// 2) SASL/OAUTHBEARER is not supported by the underlying librdkafka build; +// 3) SASL/OAUTHBEARER is supported but is not configured as the client's +// authentication mechanism. +func (a *AdminClient) SetOAuthBearerToken(oauthBearerToken OAuthBearerToken) error { + err := a.verifyClient() + if err != nil { + return err + } + return a.handle.setOAuthBearerToken(oauthBearerToken) +} + +// SetOAuthBearerTokenFailure sets the error message describing why token +// retrieval/setting failed; it also schedules a new token refresh event for 10 +// seconds later so the attempt may be retried. It will return nil on +// success, otherwise an error if: +// 1) SASL/OAUTHBEARER is not supported by the underlying librdkafka build; +// 2) SASL/OAUTHBEARER is supported but is not configured as the client's +// authentication mechanism. +func (a *AdminClient) SetOAuthBearerTokenFailure(errstr string) error { + err := a.verifyClient() + if err != nil { + return err + } + return a.handle.setOAuthBearerTokenFailure(errstr) +} + +// aclBindingToC converts a Go ACLBinding struct to a C rd_kafka_AclBinding_t +func (a *AdminClient) aclBindingToC(aclBinding *ACLBinding, cErrstr *C.char, cErrstrSize C.size_t) (result *C.rd_kafka_AclBinding_t, err error) { + var cName, cPrincipal, cHost *C.char + cName, cPrincipal, cHost = nil, nil, nil + if len(aclBinding.Name) > 0 { + cName = C.CString(aclBinding.Name) + defer C.free(unsafe.Pointer(cName)) + } + if len(aclBinding.Principal) > 0 { + cPrincipal = C.CString(aclBinding.Principal) + defer C.free(unsafe.Pointer(cPrincipal)) + } + if len(aclBinding.Host) > 0 { + cHost = C.CString(aclBinding.Host) + defer C.free(unsafe.Pointer(cHost)) + } + + result = C.rd_kafka_AclBinding_new( + C.rd_kafka_ResourceType_t(aclBinding.Type), + cName, + C.rd_kafka_ResourcePatternType_t(aclBinding.ResourcePatternType), + cPrincipal, + cHost, + C.rd_kafka_AclOperation_t(aclBinding.Operation), + C.rd_kafka_AclPermissionType_t(aclBinding.PermissionType), + cErrstr, + cErrstrSize, + ) + if result == nil { + err = newErrorFromString(ErrInvalidArg, + fmt.Sprintf("Invalid arguments for ACL binding %v: %v", aclBinding, C.GoString(cErrstr))) + } + return +} + +// aclBindingFilterToC converts a Go ACLBindingFilter struct to a C rd_kafka_AclBindingFilter_t +func (a *AdminClient) aclBindingFilterToC(aclBindingFilter *ACLBindingFilter, cErrstr *C.char, cErrstrSize C.size_t) (result *C.rd_kafka_AclBindingFilter_t, err error) { + var cName, cPrincipal, cHost *C.char + cName, cPrincipal, cHost = nil, nil, nil + if len(aclBindingFilter.Name) > 0 { + cName = C.CString(aclBindingFilter.Name) + defer C.free(unsafe.Pointer(cName)) + } + if len(aclBindingFilter.Principal) > 0 { + cPrincipal = C.CString(aclBindingFilter.Principal) + defer C.free(unsafe.Pointer(cPrincipal)) + } + if len(aclBindingFilter.Host) > 0 { + cHost = C.CString(aclBindingFilter.Host) + defer C.free(unsafe.Pointer(cHost)) + } + + result = C.rd_kafka_AclBindingFilter_new( + C.rd_kafka_ResourceType_t(aclBindingFilter.Type), + cName, + C.rd_kafka_ResourcePatternType_t(aclBindingFilter.ResourcePatternType), + cPrincipal, + cHost, + C.rd_kafka_AclOperation_t(aclBindingFilter.Operation), + C.rd_kafka_AclPermissionType_t(aclBindingFilter.PermissionType), + cErrstr, + cErrstrSize, + ) + if result == nil { + err = newErrorFromString(ErrInvalidArg, + fmt.Sprintf("Invalid arguments for ACL binding filter %v: %v", aclBindingFilter, C.GoString(cErrstr))) + } + return +} + +// cToACLBinding converts a C rd_kafka_AclBinding_t to Go ACLBinding +func (a *AdminClient) cToACLBinding(cACLBinding *C.rd_kafka_AclBinding_t) ACLBinding { + return ACLBinding{ + ResourceType(C.rd_kafka_AclBinding_restype(cACLBinding)), + C.GoString(C.rd_kafka_AclBinding_name(cACLBinding)), + ResourcePatternType(C.rd_kafka_AclBinding_resource_pattern_type(cACLBinding)), + C.GoString(C.rd_kafka_AclBinding_principal(cACLBinding)), + C.GoString(C.rd_kafka_AclBinding_host(cACLBinding)), + ACLOperation(C.rd_kafka_AclBinding_operation(cACLBinding)), + ACLPermissionType(C.rd_kafka_AclBinding_permission_type(cACLBinding)), + } +} + +// cToACLBindings converts a C rd_kafka_AclBinding_t list to Go ACLBindings +func (a *AdminClient) cToACLBindings(cACLBindings **C.rd_kafka_AclBinding_t, aclCnt C.size_t) (result ACLBindings) { + result = make(ACLBindings, aclCnt) + for i := uint(0); i < uint(aclCnt); i++ { + cACLBinding := C.AclBinding_by_idx(cACLBindings, aclCnt, C.size_t(i)) + if cACLBinding == nil { + panic("AclBinding_by_idx must not return nil") + } + result[i] = a.cToACLBinding(cACLBinding) + } + return +} + +// cToCreateACLResults converts a C acl_result_t array to Go CreateACLResult list. +func (a *AdminClient) cToCreateACLResults(cCreateAclsRes **C.rd_kafka_acl_result_t, aclCnt C.size_t) (result []CreateACLResult, err error) { + result = make([]CreateACLResult, uint(aclCnt)) + + for i := uint(0); i < uint(aclCnt); i++ { + cCreateACLRes := C.acl_result_by_idx(cCreateAclsRes, aclCnt, C.size_t(i)) + if cCreateACLRes != nil { + cCreateACLError := C.rd_kafka_acl_result_error(cCreateACLRes) + result[i].Error = newErrorFromCError(cCreateACLError) + } + } + + return result, nil +} + +// cToDescribeACLsResult converts a C rd_kafka_event_t to a Go DescribeAclsResult struct. +func (a *AdminClient) cToDescribeACLsResult(rkev *C.rd_kafka_event_t) (result *DescribeACLsResult) { + result = &DescribeACLsResult{} + err := C.rd_kafka_event_error(rkev) + errCode := ErrorCode(err) + errStr := C.rd_kafka_event_error_string(rkev) + + var cResultACLsCount C.size_t + cResult := C.rd_kafka_event_DescribeAcls_result(rkev) + cResultACLs := C.rd_kafka_DescribeAcls_result_acls(cResult, &cResultACLsCount) + if errCode != ErrNoError { + result.Error = newErrorFromCString(err, errStr) + } + result.ACLBindings = a.cToACLBindings(cResultACLs, cResultACLsCount) + return +} + +// cToDeleteACLsResults converts a C rd_kafka_DeleteAcls_result_response_t array to Go DeleteAclsResult slice. +func (a *AdminClient) cToDeleteACLsResults(cDeleteACLsResResponse **C.rd_kafka_DeleteAcls_result_response_t, resResponseCnt C.size_t) (result []DeleteACLsResult) { + result = make([]DeleteACLsResult, uint(resResponseCnt)) + + for i := uint(0); i < uint(resResponseCnt); i++ { + cDeleteACLsResResponse := C.DeleteAcls_result_response_by_idx(cDeleteACLsResResponse, resResponseCnt, C.size_t(i)) + if cDeleteACLsResResponse == nil { + panic("DeleteAcls_result_response_by_idx must not return nil") + } + + cDeleteACLsError := C.rd_kafka_DeleteAcls_result_response_error(cDeleteACLsResResponse) + result[i].Error = newErrorFromCError(cDeleteACLsError) + + var cMatchingACLsCount C.size_t + cMatchingACLs := C.rd_kafka_DeleteAcls_result_response_matching_acls( + cDeleteACLsResResponse, &cMatchingACLsCount) + + result[i].ACLBindings = a.cToACLBindings(cMatchingACLs, cMatchingACLsCount) + } + return +} + +// CreateACLs creates one or more ACL bindings. +// +// Parameters: +// - `ctx` - context with the maximum amount of time to block, or nil for indefinite. +// - `aclBindings` - A slice of ACL binding specifications to create. +// - `options` - Create ACLs options +// +// Returns a slice of CreateACLResult with a ErrNoError ErrorCode when the operation was successful +// plus an error that is not nil for client level errors +func (a *AdminClient) CreateACLs(ctx context.Context, aclBindings ACLBindings, options ...CreateACLsAdminOption) (result []CreateACLResult, err error) { + err = a.verifyClient() + if err != nil { + return nil, err + } + + if aclBindings == nil { + return nil, newErrorFromString(ErrInvalidArg, + "Expected non-nil slice of ACLBinding structs") + } + if len(aclBindings) == 0 { + return nil, newErrorFromString(ErrInvalidArg, + "Expected non-empty slice of ACLBinding structs") + } + + cErrstrSize := C.size_t(512) + cErrstr := (*C.char)(C.malloc(cErrstrSize)) + defer C.free(unsafe.Pointer(cErrstr)) + + cACLBindings := make([]*C.rd_kafka_AclBinding_t, len(aclBindings)) + + for i, aclBinding := range aclBindings { + cACLBindings[i], err = a.aclBindingToC(&aclBinding, cErrstr, cErrstrSize) + if err != nil { + return + } + defer C.rd_kafka_AclBinding_destroy(cACLBindings[i]) + } + + // Convert Go AdminOptions (if any) to C AdminOptions + genericOptions := make([]AdminOption, len(options)) + for i := range options { + genericOptions[i] = options[i] + } + cOptions, err := adminOptionsSetup(a.handle, C.RD_KAFKA_ADMIN_OP_CREATEACLS, genericOptions) + if err != nil { + return nil, err + } + + // Create temporary queue for async operation + cQueue := C.rd_kafka_queue_new(a.handle.rk) + defer C.rd_kafka_queue_destroy(cQueue) + + // Asynchronous call + C.rd_kafka_CreateAcls( + a.handle.rk, + (**C.rd_kafka_AclBinding_t)(&cACLBindings[0]), + C.size_t(len(cACLBindings)), + cOptions, + cQueue) + + // Wait for result, error or context timeout + rkev, err := a.waitResult(ctx, cQueue, C.RD_KAFKA_EVENT_CREATEACLS_RESULT) + if err != nil { + return nil, err + } + defer C.rd_kafka_event_destroy(rkev) + + var cResultCnt C.size_t + cResult := C.rd_kafka_event_CreateAcls_result(rkev) + aclResults := C.rd_kafka_CreateAcls_result_acls(cResult, &cResultCnt) + result, err = a.cToCreateACLResults(aclResults, cResultCnt) + return +} + +// DescribeACLs matches ACL bindings by filter. +// +// Parameters: +// - `ctx` - context with the maximum amount of time to block, or nil for indefinite. +// - `aclBindingFilter` - A filter with attributes that must match. +// string attributes match exact values or any string if set to empty string. +// Enum attributes match exact values or any value if ending with `Any`. +// If `ResourcePatternType` is set to `ResourcePatternTypeMatch` returns ACL bindings with: +// - `ResourcePatternTypeLiteral` pattern type with resource name equal to the given resource name +// - `ResourcePatternTypeLiteral` pattern type with wildcard resource name that matches the given resource name +// - `ResourcePatternTypePrefixed` pattern type with resource name that is a prefix of the given resource name +// - `options` - Describe ACLs options +// +// Returns a slice of ACLBindings when the operation was successful +// plus an error that is not `nil` for client level errors +func (a *AdminClient) DescribeACLs(ctx context.Context, aclBindingFilter ACLBindingFilter, options ...DescribeACLsAdminOption) (result *DescribeACLsResult, err error) { + err = a.verifyClient() + if err != nil { + return nil, err + } + + cErrstrSize := C.size_t(512) + cErrstr := (*C.char)(C.malloc(cErrstrSize)) + defer C.free(unsafe.Pointer(cErrstr)) + + cACLBindingFilter, err := a.aclBindingFilterToC(&aclBindingFilter, cErrstr, cErrstrSize) + if err != nil { + return + } + + // Convert Go AdminOptions (if any) to C AdminOptions + genericOptions := make([]AdminOption, len(options)) + for i := range options { + genericOptions[i] = options[i] + } + cOptions, err := adminOptionsSetup(a.handle, C.RD_KAFKA_ADMIN_OP_DESCRIBEACLS, genericOptions) + if err != nil { + return nil, err + } + // Create temporary queue for async operation + cQueue := C.rd_kafka_queue_new(a.handle.rk) + defer C.rd_kafka_queue_destroy(cQueue) + + // Asynchronous call + C.rd_kafka_DescribeAcls( + a.handle.rk, + cACLBindingFilter, + cOptions, + cQueue) + + // Wait for result, error or context timeout + rkev, err := a.waitResult(ctx, cQueue, C.RD_KAFKA_EVENT_DESCRIBEACLS_RESULT) + if err != nil { + return nil, err + } + defer C.rd_kafka_event_destroy(rkev) + result = a.cToDescribeACLsResult(rkev) + return +} + +// DeleteACLs deletes ACL bindings matching one or more ACL binding filters. +// +// Parameters: +// - `ctx` - context with the maximum amount of time to block, or nil for indefinite. +// - `aclBindingFilters` - a slice of ACL binding filters to match ACLs to delete. +// string attributes match exact values or any string if set to empty string. +// Enum attributes match exact values or any value if ending with `Any`. +// If `ResourcePatternType` is set to `ResourcePatternTypeMatch` deletes ACL bindings with: +// - `ResourcePatternTypeLiteral` pattern type with resource name equal to the given resource name +// - `ResourcePatternTypeLiteral` pattern type with wildcard resource name that matches the given resource name +// - `ResourcePatternTypePrefixed` pattern type with resource name that is a prefix of the given resource name +// - `options` - Delete ACLs options +// +// Returns a slice of ACLBinding for each filter when the operation was successful +// plus an error that is not `nil` for client level errors +func (a *AdminClient) DeleteACLs(ctx context.Context, aclBindingFilters ACLBindingFilters, options ...DeleteACLsAdminOption) (result []DeleteACLsResult, err error) { + err = a.verifyClient() + if err != nil { + return nil, err + } + + if aclBindingFilters == nil { + return nil, newErrorFromString(ErrInvalidArg, + "Expected non-nil slice of ACLBindingFilter structs") + } + if len(aclBindingFilters) == 0 { + return nil, newErrorFromString(ErrInvalidArg, + "Expected non-empty slice of ACLBindingFilter structs") + } + + cErrstrSize := C.size_t(512) + cErrstr := (*C.char)(C.malloc(cErrstrSize)) + defer C.free(unsafe.Pointer(cErrstr)) + + cACLBindingFilters := make([]*C.rd_kafka_AclBindingFilter_t, len(aclBindingFilters)) + + for i, aclBindingFilter := range aclBindingFilters { + cACLBindingFilters[i], err = a.aclBindingFilterToC(&aclBindingFilter, cErrstr, cErrstrSize) + if err != nil { + return + } + defer C.rd_kafka_AclBinding_destroy(cACLBindingFilters[i]) + } + + // Convert Go AdminOptions (if any) to C AdminOptions + genericOptions := make([]AdminOption, len(options)) + for i := range options { + genericOptions[i] = options[i] + } + cOptions, err := adminOptionsSetup(a.handle, C.RD_KAFKA_ADMIN_OP_DELETEACLS, genericOptions) + if err != nil { + return nil, err + } + // Create temporary queue for async operation + cQueue := C.rd_kafka_queue_new(a.handle.rk) + defer C.rd_kafka_queue_destroy(cQueue) + + // Asynchronous call + C.rd_kafka_DeleteAcls( + a.handle.rk, + (**C.rd_kafka_AclBindingFilter_t)(&cACLBindingFilters[0]), + C.size_t(len(cACLBindingFilters)), + cOptions, + cQueue) + + // Wait for result, error or context timeout + rkev, err := a.waitResult(ctx, cQueue, C.RD_KAFKA_EVENT_DELETEACLS_RESULT) + if err != nil { + return nil, err + } + defer C.rd_kafka_event_destroy(rkev) + + var cResultResponsesCount C.size_t + cResult := C.rd_kafka_event_DeleteAcls_result(rkev) + cResultResponses := C.rd_kafka_DeleteAcls_result_responses(cResult, &cResultResponsesCount) + result = a.cToDeleteACLsResults(cResultResponses, cResultResponsesCount) + return +} + +// SetSaslCredentials sets the SASL credentials used for this admin client. +// The new credentials will overwrite the old ones (which were set when creating +// the admin client or by a previous call to SetSaslCredentials). The new +// credentials will be used the next time the admin client needs to authenticate +// to a broker. This method will not disconnect existing broker connections that +// were established with the old credentials. +// This method applies only to the SASL PLAIN and SCRAM mechanisms. +func (a *AdminClient) SetSaslCredentials(username, password string) error { + err := a.verifyClient() + if err != nil { + return err + } + + return setSaslCredentials(a.handle.rk, username, password) +} + +// Close an AdminClient instance. +func (a *AdminClient) Close() { + if !atomic.CompareAndSwapUint32(&a.isClosed, 0, 1) { + return + } + if a.isDerived { + // Derived AdminClient needs no cleanup. + a.handle = &handle{} + return + } + + a.handle.cleanup() + + C.rd_kafka_destroy(a.handle.rk) +} + +// ListConsumerGroups lists the consumer groups available in the cluster. +// +// Parameters: +// - `ctx` - context with the maximum amount of time to block, or nil for +// indefinite. +// - `options` - ListConsumerGroupsAdminOption options. +// +// Returns a ListConsumerGroupsResult, which contains a slice corresponding to +// each group in the cluster and a slice of errors encountered while listing. +// Additionally, an error that is not nil for client-level errors is returned. +// Both the returned error, and the errors slice should be checked. +func (a *AdminClient) ListConsumerGroups( + ctx context.Context, + options ...ListConsumerGroupsAdminOption) (result ListConsumerGroupsResult, err error) { + + result = ListConsumerGroupsResult{} + err = a.verifyClient() + if err != nil { + return result, err + } + + // Convert Go AdminOptions (if any) to C AdminOptions. + genericOptions := make([]AdminOption, len(options)) + for i := range options { + genericOptions[i] = options[i] + } + cOptions, err := adminOptionsSetup(a.handle, + C.RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPS, genericOptions) + if err != nil { + return result, err + } + defer C.rd_kafka_AdminOptions_destroy(cOptions) + + // Create temporary queue for async operation. + cQueue := C.rd_kafka_queue_new(a.handle.rk) + defer C.rd_kafka_queue_destroy(cQueue) + + // Call rd_kafka_ListConsumerGroups (asynchronous). + C.rd_kafka_ListConsumerGroups( + a.handle.rk, + cOptions, + cQueue) + + // Wait for result, error or context timeout. + rkev, err := a.waitResult( + ctx, cQueue, C.RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT) + if err != nil { + return result, err + } + defer C.rd_kafka_event_destroy(rkev) + + cRes := C.rd_kafka_event_ListConsumerGroups_result(rkev) + + // Convert result and broker errors from C to Go. + var cGroupCount C.size_t + cGroups := C.rd_kafka_ListConsumerGroups_result_valid(cRes, &cGroupCount) + result.Valid = a.cToConsumerGroupListings(cGroups, cGroupCount) + + var cErrsCount C.size_t + cErrs := C.rd_kafka_ListConsumerGroups_result_errors(cRes, &cErrsCount) + if cErrsCount == 0 { + return result, nil + } + + result.Errors = a.cToErrorList(cErrs, cErrsCount) + return result, nil +} + +// DescribeConsumerGroups describes groups from cluster as specified by the +// groups list. +// +// Parameters: +// - `ctx` - context with the maximum amount of time to block, or nil for +// indefinite. +// - `groups` - Slice of groups to describe. This should not be nil/empty. +// - `options` - DescribeConsumerGroupsAdminOption options. +// +// Returns DescribeConsumerGroupsResult, which contains a slice of +// ConsumerGroupDescriptions corresponding to the input groups, plus an error +// that is not `nil` for client level errors. Individual +// ConsumerGroupDescriptions inside the slice should also be checked for +// errors. +func (a *AdminClient) DescribeConsumerGroups( + ctx context.Context, groups []string, + options ...DescribeConsumerGroupsAdminOption) (result DescribeConsumerGroupsResult, err error) { + + describeResult := DescribeConsumerGroupsResult{} + err = a.verifyClient() + if err != nil { + return result, err + } + + // Convert group names into char** required by the implementation. + cGroupNameList := make([]*C.char, len(groups)) + cGroupNameCount := C.size_t(len(groups)) + + for idx, group := range groups { + cGroupNameList[idx] = C.CString(group) + defer C.free(unsafe.Pointer(cGroupNameList[idx])) + } + + var cGroupNameListPtr **C.char + if cGroupNameCount > 0 { + cGroupNameListPtr = ((**C.char)(&cGroupNameList[0])) + } + + // Convert Go AdminOptions (if any) to C AdminOptions. + genericOptions := make([]AdminOption, len(options)) + for i := range options { + genericOptions[i] = options[i] + } + cOptions, err := adminOptionsSetup( + a.handle, C.RD_KAFKA_ADMIN_OP_DESCRIBECONSUMERGROUPS, genericOptions) + if err != nil { + return describeResult, err + } + defer C.rd_kafka_AdminOptions_destroy(cOptions) + + // Create temporary queue for async operation. + cQueue := C.rd_kafka_queue_new(a.handle.rk) + defer C.rd_kafka_queue_destroy(cQueue) + + // Call rd_kafka_DescribeConsumerGroups (asynchronous). + C.rd_kafka_DescribeConsumerGroups( + a.handle.rk, + cGroupNameListPtr, + cGroupNameCount, + cOptions, + cQueue) + + // Wait for result, error or context timeout. + rkev, err := a.waitResult( + ctx, cQueue, C.RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT) + if err != nil { + return describeResult, err + } + defer C.rd_kafka_event_destroy(rkev) + + cRes := C.rd_kafka_event_DescribeConsumerGroups_result(rkev) + + // Convert result from C to Go. + var cGroupCount C.size_t + cGroups := C.rd_kafka_DescribeConsumerGroups_result_groups(cRes, &cGroupCount) + describeResult.ConsumerGroupDescriptions = a.cToConsumerGroupDescriptions(cGroups, cGroupCount) + + return describeResult, nil +} + +// DescribeTopics describes topics from cluster as specified by the +// topics list. +// +// Parameters: +// - `ctx` - context with the maximum amount of time to block, or nil for +// indefinite. +// - `topics` - Collection of topics to describe. This should not have nil +// topic names. +// - `options` - DescribeTopicsAdminOption options. +// +// Returns DescribeTopicsResult, which contains a slice of +// TopicDescriptions corresponding to the input topics, plus an error +// that is not `nil` for client level errors. Individual +// TopicDescriptions inside the slice should also be checked for +// errors. Individual TopicDescriptions also have a +// slice of allowed ACLOperations. +func (a *AdminClient) DescribeTopics( + ctx context.Context, topics TopicCollection, + options ...DescribeTopicsAdminOption) (result DescribeTopicsResult, err error) { + + describeResult := DescribeTopicsResult{} + err = a.verifyClient() + if err != nil { + return result, err + } + + // Convert topic names into char**. + cTopicNameList := make([]*C.char, len(topics.topicNames)) + cTopicNameCount := C.size_t(len(topics.topicNames)) + + if topics.topicNames == nil { + return describeResult, newErrorFromString(ErrInvalidArg, + "TopicCollection of topic names cannot be nil") + } + + for idx, topic := range topics.topicNames { + cTopicNameList[idx] = C.CString(topic) + defer C.free(unsafe.Pointer(cTopicNameList[idx])) + } + + var cTopicNameListPtr **C.char + if cTopicNameCount > 0 { + cTopicNameListPtr = ((**C.char)(&cTopicNameList[0])) + } + + // Convert char** of topic names into rd_kafka_TopicCollection_t* + cTopicCollection := C.rd_kafka_TopicCollection_of_topic_names( + cTopicNameListPtr, cTopicNameCount) + defer C.rd_kafka_TopicCollection_destroy(cTopicCollection) + + // Convert Go AdminOptions (if any) to C AdminOptions. + genericOptions := make([]AdminOption, len(options)) + for i := range options { + genericOptions[i] = options[i] + } + cOptions, err := adminOptionsSetup( + a.handle, C.RD_KAFKA_ADMIN_OP_DESCRIBETOPICS, genericOptions) + if err != nil { + return describeResult, err + } + defer C.rd_kafka_AdminOptions_destroy(cOptions) + + // Create temporary queue for async operation. + cQueue := C.rd_kafka_queue_new(a.handle.rk) + defer C.rd_kafka_queue_destroy(cQueue) + + // Call rd_kafka_DescribeTopics (asynchronous). + C.rd_kafka_DescribeTopics( + a.handle.rk, + cTopicCollection, + cOptions, + cQueue) + + // Wait for result, error or context timeout. + rkev, err := a.waitResult( + ctx, cQueue, C.RD_KAFKA_EVENT_DESCRIBETOPICS_RESULT) + if err != nil { + return describeResult, err + } + defer C.rd_kafka_event_destroy(rkev) + + cRes := C.rd_kafka_event_DescribeTopics_result(rkev) + + // Convert result from C to Go. + var cTopicDescriptionCount C.size_t + cTopicDescriptions := + C.rd_kafka_DescribeTopics_result_topics(cRes, &cTopicDescriptionCount) + describeResult.TopicDescriptions = + a.cToTopicDescriptions(cTopicDescriptions, cTopicDescriptionCount) + + return describeResult, nil +} + +// DescribeCluster describes the cluster +// +// Parameters: +// - `ctx` - context with the maximum amount of time to block, or nil for +// indefinite. +// - `options` - DescribeClusterAdminOption options. +// +// Returns ClusterDescription, which contains current cluster ID and controller +// along with a slice of Nodes. It also has a slice of allowed ACLOperations. +func (a *AdminClient) DescribeCluster( + ctx context.Context, + options ...DescribeClusterAdminOption) (result DescribeClusterResult, err error) { + err = a.verifyClient() + if err != nil { + return result, err + } + clusterDesc := DescribeClusterResult{} + + // Convert Go AdminOptions (if any) to C AdminOptions. + genericOptions := make([]AdminOption, len(options)) + for i := range options { + genericOptions[i] = options[i] + } + cOptions, err := adminOptionsSetup( + a.handle, C.RD_KAFKA_ADMIN_OP_DESCRIBECLUSTER, genericOptions) + if err != nil { + return clusterDesc, err + } + defer C.rd_kafka_AdminOptions_destroy(cOptions) + + // Create temporary queue for async operation. + cQueue := C.rd_kafka_queue_new(a.handle.rk) + defer C.rd_kafka_queue_destroy(cQueue) + + // Call rd_kafka_DescribeCluster (asynchronous). + C.rd_kafka_DescribeCluster( + a.handle.rk, + cOptions, + cQueue) + + // Wait for result, error or context timeout. + rkev, err := a.waitResult( + ctx, cQueue, C.RD_KAFKA_EVENT_DESCRIBECLUSTER_RESULT) + if err != nil { + return clusterDesc, err + } + defer C.rd_kafka_event_destroy(rkev) + + cRes := C.rd_kafka_event_DescribeCluster_result(rkev) + + // Convert result from C to Go. + clusterDesc = a.cToDescribeClusterResult(cRes) + + return clusterDesc, nil +} + +// DeleteConsumerGroups deletes a batch of consumer groups. +// Parameters: +// - `ctx` - context with the maximum amount of time to block, or nil for +// indefinite. +// - `groups` - A slice of groupIDs to delete. +// - `options` - DeleteConsumerGroupsAdminOption options. +// +// Returns a DeleteConsumerGroupsResult containing a slice of ConsumerGroupResult, with +// group-level errors, (if any) contained inside; and an error that is not nil +// for client level errors. +func (a *AdminClient) DeleteConsumerGroups( + ctx context.Context, + groups []string, options ...DeleteConsumerGroupsAdminOption) (result DeleteConsumerGroupsResult, err error) { + cGroups := make([]*C.rd_kafka_DeleteGroup_t, len(groups)) + deleteResult := DeleteConsumerGroupsResult{} + err = a.verifyClient() + if err != nil { + return deleteResult, err + } + + // Convert Go DeleteGroups to C DeleteGroups + for i, group := range groups { + cGroupID := C.CString(group) + defer C.free(unsafe.Pointer(cGroupID)) + + cGroups[i] = C.rd_kafka_DeleteGroup_new(cGroupID) + if cGroups[i] == nil { + return deleteResult, newErrorFromString(ErrInvalidArg, + fmt.Sprintf("Invalid arguments for group %s", group)) + } + + defer C.rd_kafka_DeleteGroup_destroy(cGroups[i]) + } + + // Convert Go AdminOptions (if any) to C AdminOptions + genericOptions := make([]AdminOption, len(options)) + for i := range options { + genericOptions[i] = options[i] + } + cOptions, err := adminOptionsSetup( + a.handle, C.RD_KAFKA_ADMIN_OP_DELETEGROUPS, genericOptions) + if err != nil { + return deleteResult, err + } + defer C.rd_kafka_AdminOptions_destroy(cOptions) + + // Create temporary queue for async operation + cQueue := C.rd_kafka_queue_new(a.handle.rk) + defer C.rd_kafka_queue_destroy(cQueue) + + // Asynchronous call + C.rd_kafka_DeleteGroups( + a.handle.rk, + (**C.rd_kafka_DeleteGroup_t)(&cGroups[0]), + C.size_t(len(cGroups)), + cOptions, + cQueue) + + // Wait for result, error or context timeout + rkev, err := a.waitResult(ctx, cQueue, C.RD_KAFKA_EVENT_DELETEGROUPS_RESULT) + if err != nil { + return deleteResult, err + } + defer C.rd_kafka_event_destroy(rkev) + + cRes := C.rd_kafka_event_DeleteGroups_result(rkev) + + // Convert result from C to Go + var cCnt C.size_t + cGroupRes := C.rd_kafka_DeleteGroups_result_groups(cRes, &cCnt) + + deleteResult.ConsumerGroupResults, err = a.cToConsumerGroupResults(cGroupRes, cCnt) + return deleteResult, err +} + +// ListConsumerGroupOffsets fetches the offsets for topic partition(s) for +// consumer group(s). +// +// Parameters: +// - `ctx` - context with the maximum amount of time to block, or nil for indefinite. +// - `groupsPartitions` - a slice of ConsumerGroupTopicPartitions, each element of which +// has the id of a consumer group, and a slice of the TopicPartitions we +// need to fetch the offsets for. The slice of TopicPartitions can be nil, to fetch +// all topic partitions for that group. +// Currently, the size of `groupsPartitions` has to be exactly one. +// - `options` - ListConsumerGroupOffsetsAdminOption options. +// +// Returns a ListConsumerGroupOffsetsResult, containing a slice of +// ConsumerGroupTopicPartitions corresponding to the input slice, plus an error that is +// not `nil` for client level errors. Individual TopicPartitions inside each of +// the ConsumerGroupTopicPartitions should also be checked for errors. +func (a *AdminClient) ListConsumerGroupOffsets( + ctx context.Context, groupsPartitions []ConsumerGroupTopicPartitions, + options ...ListConsumerGroupOffsetsAdminOption) (lcgor ListConsumerGroupOffsetsResult, err error) { + err = a.verifyClient() + if err != nil { + return lcgor, err + } + + lcgor.ConsumerGroupsTopicPartitions = nil + + // For now, we only support one group at a time given as a single element of + // groupsPartitions. + // Code has been written so that only this if-guard needs to be removed when + // we add support for multiple ConsumerGroupTopicPartitions. + if len(groupsPartitions) != 1 { + return lcgor, fmt.Errorf( + "expected length of groupsPartitions is 1, got %d", len(groupsPartitions)) + } + + cGroupsPartitions := make([]*C.rd_kafka_ListConsumerGroupOffsets_t, + len(groupsPartitions)) + + // Convert Go ConsumerGroupTopicPartitions to C ListConsumerGroupOffsets. + for i, groupPartitions := range groupsPartitions { + // We need to destroy this list because rd_kafka_ListConsumerGroupOffsets_new + // creates a copy of it. + var cPartitions *C.rd_kafka_topic_partition_list_t = nil + + if groupPartitions.Partitions != nil { + cPartitions = newCPartsFromTopicPartitions(groupPartitions.Partitions) + defer C.rd_kafka_topic_partition_list_destroy(cPartitions) + } + + cGroupID := C.CString(groupPartitions.Group) + defer C.free(unsafe.Pointer(cGroupID)) + + cGroupsPartitions[i] = + C.rd_kafka_ListConsumerGroupOffsets_new(cGroupID, cPartitions) + defer C.rd_kafka_ListConsumerGroupOffsets_destroy(cGroupsPartitions[i]) + } + + // Convert Go AdminOptions (if any) to C AdminOptions. + genericOptions := make([]AdminOption, len(options)) + for i := range options { + genericOptions[i] = options[i] + } + cOptions, err := adminOptionsSetup( + a.handle, C.RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPOFFSETS, genericOptions) + if err != nil { + return lcgor, err + } + defer C.rd_kafka_AdminOptions_destroy(cOptions) + + // Create temporary queue for async operation. + cQueue := C.rd_kafka_queue_new(a.handle.rk) + defer C.rd_kafka_queue_destroy(cQueue) + + // Call rd_kafka_ListConsumerGroupOffsets (asynchronous). + C.rd_kafka_ListConsumerGroupOffsets( + a.handle.rk, + (**C.rd_kafka_ListConsumerGroupOffsets_t)(&cGroupsPartitions[0]), + C.size_t(len(cGroupsPartitions)), + cOptions, + cQueue) + + // Wait for result, error or context timeout. + rkev, err := a.waitResult( + ctx, cQueue, C.RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT) + if err != nil { + return lcgor, err + } + defer C.rd_kafka_event_destroy(rkev) + + cRes := C.rd_kafka_event_ListConsumerGroupOffsets_result(rkev) + + // Convert result from C to Go. + var cGroupCount C.size_t + cGroups := C.rd_kafka_ListConsumerGroupOffsets_result_groups(cRes, &cGroupCount) + lcgor.ConsumerGroupsTopicPartitions = a.cToConsumerGroupTopicPartitions(cGroups, cGroupCount) + + return lcgor, nil +} + +// AlterConsumerGroupOffsets alters the offsets for topic partition(s) for +// consumer group(s). +// +// Parameters: +// - `ctx` - context with the maximum amount of time to block, or nil for +// indefinite. +// - `groupsPartitions` - a slice of ConsumerGroupTopicPartitions, each element of +// which has the id of a consumer group, and a slice of the TopicPartitions +// we need to alter the offsets for. Currently, the size of +// `groupsPartitions` has to be exactly one. +// - `options` - AlterConsumerGroupOffsetsAdminOption options. +// +// Returns a AlterConsumerGroupOffsetsResult, containing a slice of +// ConsumerGroupTopicPartitions corresponding to the input slice, plus an error +// that is not `nil` for client level errors. Individual TopicPartitions inside +// each of the ConsumerGroupTopicPartitions should also be checked for errors. +// This will succeed at the partition level only if the group is not actively +// subscribed to the corresponding topic(s). +func (a *AdminClient) AlterConsumerGroupOffsets( + ctx context.Context, groupsPartitions []ConsumerGroupTopicPartitions, + options ...AlterConsumerGroupOffsetsAdminOption) (acgor AlterConsumerGroupOffsetsResult, err error) { + err = a.verifyClient() + if err != nil { + return acgor, err + } + + acgor.ConsumerGroupsTopicPartitions = nil + + // For now, we only support one group at a time given as a single element of groupsPartitions. + // Code has been written so that only this if-guard needs to be removed when we add support for + // multiple ConsumerGroupTopicPartitions. + if len(groupsPartitions) != 1 { + return acgor, fmt.Errorf( + "expected length of groupsPartitions is 1, got %d", + len(groupsPartitions)) + } + + cGroupsPartitions := make( + []*C.rd_kafka_AlterConsumerGroupOffsets_t, len(groupsPartitions)) + + // Convert Go ConsumerGroupTopicPartitions to C AlterConsumerGroupOffsets. + for idx, groupPartitions := range groupsPartitions { + // We need to destroy this list because rd_kafka_AlterConsumerGroupOffsets_new + // creates a copy of it. + cPartitions := newCPartsFromTopicPartitions(groupPartitions.Partitions) + + cGroupID := C.CString(groupPartitions.Group) + defer C.free(unsafe.Pointer(cGroupID)) + + cGroupsPartitions[idx] = + C.rd_kafka_AlterConsumerGroupOffsets_new(cGroupID, cPartitions) + defer C.rd_kafka_AlterConsumerGroupOffsets_destroy(cGroupsPartitions[idx]) + } + + // Convert Go AdminOptions (if any) to C AdminOptions. + genericOptions := make([]AdminOption, len(options)) + for i := range options { + genericOptions[i] = options[i] + } + cOptions, err := adminOptionsSetup( + a.handle, C.RD_KAFKA_ADMIN_OP_ALTERCONSUMERGROUPOFFSETS, genericOptions) + if err != nil { + return acgor, err + } + defer C.rd_kafka_AdminOptions_destroy(cOptions) + + // Create temporary queue for async operation. + cQueue := C.rd_kafka_queue_new(a.handle.rk) + defer C.rd_kafka_queue_destroy(cQueue) + + // Call rd_kafka_AlterConsumerGroupOffsets (asynchronous). + C.rd_kafka_AlterConsumerGroupOffsets( + a.handle.rk, + (**C.rd_kafka_AlterConsumerGroupOffsets_t)(&cGroupsPartitions[0]), + C.size_t(len(cGroupsPartitions)), + cOptions, + cQueue) + + // Wait for result, error or context timeout. + rkev, err := a.waitResult( + ctx, cQueue, C.RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT) + if err != nil { + return acgor, err + } + defer C.rd_kafka_event_destroy(rkev) + + cRes := C.rd_kafka_event_AlterConsumerGroupOffsets_result(rkev) + + // Convert result from C to Go. + var cGroupCount C.size_t + cGroups := C.rd_kafka_AlterConsumerGroupOffsets_result_groups(cRes, &cGroupCount) + acgor.ConsumerGroupsTopicPartitions = a.cToConsumerGroupTopicPartitions(cGroups, cGroupCount) + + return acgor, nil +} + +// DescribeUserScramCredentials describe SASL/SCRAM credentials for the +// specified user names. +// +// Parameters: +// - `ctx` - context with the maximum amount of time to block, or nil for +// indefinite. +// - `users` - a slice of string, each one correspond to a user name, no +// duplicates are allowed +// - `options` - DescribeUserScramCredentialsAdminOption options. +// +// Returns a map from user name to user SCRAM credentials description. +// Each description can have an individual error. +func (a *AdminClient) DescribeUserScramCredentials( + ctx context.Context, users []string, + options ...DescribeUserScramCredentialsAdminOption) (result DescribeUserScramCredentialsResult, err error) { + result = DescribeUserScramCredentialsResult{ + Descriptions: make(map[string]UserScramCredentialsDescription), + } + err = a.verifyClient() + if err != nil { + return result, err + } + + // Convert user names into char** required by the implementation. + cUserList := make([]*C.char, len(users)) + cUserCount := C.size_t(len(users)) + + for idx, user := range users { + cUserList[idx] = C.CString(user) + defer C.free(unsafe.Pointer(cUserList[idx])) + } + + var cUserListPtr **C.char + if cUserCount > 0 { + cUserListPtr = ((**C.char)(&cUserList[0])) + } + + // Convert Go AdminOptions (if any) to C AdminOptions. + genericOptions := make([]AdminOption, len(options)) + for i := range options { + genericOptions[i] = options[i] + } + cOptions, err := adminOptionsSetup( + a.handle, + C.RD_KAFKA_ADMIN_OP_DESCRIBEUSERSCRAMCREDENTIALS, genericOptions) + if err != nil { + return result, err + } + defer C.rd_kafka_AdminOptions_destroy(cOptions) + + // Create temporary queue for async operation. + cQueue := C.rd_kafka_queue_new(a.handle.rk) + defer C.rd_kafka_queue_destroy(cQueue) + + // Call rd_kafka_DescribeConsumerGroups (asynchronous). + C.rd_kafka_DescribeUserScramCredentials( + a.handle.rk, + cUserListPtr, + cUserCount, + cOptions, + cQueue) + + // Wait for result, error or context timeout. + rkev, err := a.waitResult( + ctx, cQueue, C.RD_KAFKA_EVENT_DESCRIBEUSERSCRAMCREDENTIALS_RESULT) + if err != nil { + return result, err + } + defer C.rd_kafka_event_destroy(rkev) + + cRes := C.rd_kafka_event_DescribeUserScramCredentials_result(rkev) + + // Convert result from C to Go. + result.Descriptions = cToDescribeUserScramCredentialsResult(cRes) + return result, nil +} + +// ListOffsets describe offsets for the +// specified TopicPartiton based on an OffsetSpec. +// +// Parameters: +// +// - `ctx` - context with the maximum amount of time to block, or nil for +// indefinite. +// - `topicPartitionOffsets` - a map from TopicPartition to OffsetSpec, it +// holds either the OffsetSpec enum value or timestamp. Must not be nil. +// - `options` - ListOffsetsAdminOption options. +// +// Returns a ListOffsetsResult. +// Each TopicPartition's ListOffset can have an individual error. +func (a *AdminClient) ListOffsets( + ctx context.Context, topicPartitionOffsets map[TopicPartition]OffsetSpec, + options ...ListOffsetsAdminOption) (result ListOffsetsResult, err error) { + if topicPartitionOffsets == nil { + return result, newErrorFromString(ErrInvalidArg, "expected topicPartitionOffsets parameter.") + } + + topicPartitions := C.rd_kafka_topic_partition_list_new(C.int(len(topicPartitionOffsets))) + defer C.rd_kafka_topic_partition_list_destroy(topicPartitions) + + for tp, offsetValue := range topicPartitionOffsets { + cStr := C.CString(*tp.Topic) + defer C.free(unsafe.Pointer(cStr)) + topicPartition := C.rd_kafka_topic_partition_list_add(topicPartitions, cStr, C.int32_t(tp.Partition)) + topicPartition.offset = C.int64_t(offsetValue) + } + + // Convert Go AdminOptions (if any) to C AdminOptions. + genericOptions := make([]AdminOption, len(options)) + for i := range options { + genericOptions[i] = options[i] + } + cOptions, err := adminOptionsSetup( + a.handle, C.RD_KAFKA_ADMIN_OP_LISTOFFSETS, genericOptions) + if err != nil { + return result, err + } + defer C.rd_kafka_AdminOptions_destroy(cOptions) + + // Create temporary queue for async operation. + cQueue := C.rd_kafka_queue_new(a.handle.rk) + defer C.rd_kafka_queue_destroy(cQueue) + + // Call rd_kafka_ListOffsets (asynchronous). + C.rd_kafka_ListOffsets( + a.handle.rk, + topicPartitions, + cOptions, + cQueue) + + // Wait for result, error or context timeout. + rkev, err := a.waitResult( + ctx, cQueue, C.RD_KAFKA_EVENT_LISTOFFSETS_RESULT) + if err != nil { + return result, err + } + defer C.rd_kafka_event_destroy(rkev) + + cRes := C.rd_kafka_event_ListOffsets_result(rkev) + + // Convert result from C to Go. + result = cToListOffsetsResult(cRes) + + return result, nil +} + +// AlterUserScramCredentials alters SASL/SCRAM credentials. +// The pair (user, mechanism) must be unique among upsertions and deletions. +// +// Parameters: +// - `ctx` - context with the maximum amount of time to block, or nil for +// indefinite. +// - `upsertions` - a slice of user credential upsertions +// - `deletions` - a slice of user credential deletions +// - `options` - AlterUserScramCredentialsAdminOption options. +// +// Returns a map from user name to the corresponding Error, with error code +// ErrNoError when the request succeeded. +func (a *AdminClient) AlterUserScramCredentials( + ctx context.Context, upsertions []UserScramCredentialUpsertion, deletions []UserScramCredentialDeletion, + options ...AlterUserScramCredentialsAdminOption) (result AlterUserScramCredentialsResult, err error) { + result = AlterUserScramCredentialsResult{ + Errors: make(map[string]Error), + } + err = a.verifyClient() + if err != nil { + return result, err + } + + // Convert user names into char** required by the implementation. + cAlterationList := make([]*C.rd_kafka_UserScramCredentialAlteration_t, len(upsertions)+len(deletions)) + cAlterationCount := C.size_t(len(upsertions) + len(deletions)) + idx := 0 + + for _, upsertion := range upsertions { + user := C.CString(upsertion.User) + defer C.free(unsafe.Pointer(user)) + + var salt *C.uchar = nil + var saltSize C.size_t = 0 + if upsertion.Salt != nil { + salt = (*C.uchar)(&upsertion.Salt[0]) + saltSize = C.size_t(len(upsertion.Salt)) + } + + cAlterationList[idx] = C.rd_kafka_UserScramCredentialUpsertion_new(user, + C.rd_kafka_ScramMechanism_t(upsertion.ScramCredentialInfo.Mechanism), + C.int(upsertion.ScramCredentialInfo.Iterations), + (*C.uchar)(&upsertion.Password[0]), C.size_t(len(upsertion.Password)), + salt, saltSize) + defer C.rd_kafka_UserScramCredentialAlteration_destroy(cAlterationList[idx]) + idx = idx + 1 + } + + for _, deletion := range deletions { + user := C.CString(deletion.User) + defer C.free(unsafe.Pointer(user)) + cAlterationList[idx] = C.rd_kafka_UserScramCredentialDeletion_new( + user, C.rd_kafka_ScramMechanism_t(deletion.Mechanism)) + defer C.rd_kafka_UserScramCredentialAlteration_destroy(cAlterationList[idx]) + idx = idx + 1 + } + + var cAlterationListPtr **C.rd_kafka_UserScramCredentialAlteration_t + if cAlterationCount > 0 { + cAlterationListPtr = ((**C.rd_kafka_UserScramCredentialAlteration_t)(&cAlterationList[0])) + } + + // Convert Go AdminOptions (if any) to C AdminOptions. + genericOptions := make([]AdminOption, len(options)) + for i := range options { + genericOptions[i] = options[i] + } + cOptions, err := adminOptionsSetup( + a.handle, C.RD_KAFKA_ADMIN_OP_ALTERUSERSCRAMCREDENTIALS, genericOptions) + if err != nil { + return result, err + } + defer C.rd_kafka_AdminOptions_destroy(cOptions) + + // Create temporary queue for async operation. + cQueue := C.rd_kafka_queue_new(a.handle.rk) + defer C.rd_kafka_queue_destroy(cQueue) + + // Call rd_kafka_AlterUserScramCredentials (asynchronous). + C.rd_kafka_AlterUserScramCredentials( + a.handle.rk, + cAlterationListPtr, + cAlterationCount, + cOptions, + cQueue) + + // Wait for result, error or context timeout. + rkev, err := a.waitResult( + ctx, cQueue, C.RD_KAFKA_EVENT_ALTERUSERSCRAMCREDENTIALS_RESULT) + if err != nil { + return result, err + } + defer C.rd_kafka_event_destroy(rkev) + + cRes := C.rd_kafka_event_AlterUserScramCredentials_result(rkev) + + // Convert result from C to Go. + var cResponseSize C.size_t + cResponses := C.rd_kafka_AlterUserScramCredentials_result_responses(cRes, &cResponseSize) + for i := 0; i < int(cResponseSize); i++ { + cResponse := C.AlterUserScramCredentials_result_response_by_idx( + cResponses, cResponseSize, C.size_t(i)) + user := C.GoString(C.rd_kafka_AlterUserScramCredentials_result_response_user(cResponse)) + err := newErrorFromCError(C.rd_kafka_AlterUserScramCredentials_result_response_error(cResponse)) + result.Errors[user] = err + } + + return result, nil +} + +// DeleteRecords deletes records (messages) in topic partitions older than the offsets provided. +// +// Parameters: +// - `ctx` - context with the maximum amount of time to block, or nil for +// indefinite. +// - `recordsToDelete` - A slice of TopicPartitions with the offset field set. +// For each partition, delete all messages up to but not including the specified offset. +// The offset could be set to kafka.OffsetEnd to delete all the messages in the partition. +// - `options` - DeleteRecordsAdminOptions options. +// +// Returns a DeleteRecordsResults, which contains a slice of +// DeleteRecordsResult, each representing the result for one topic partition. +// Individual TopicPartitions inside the DeleteRecordsResult should be checked for errors. +// If successful, the DeletedRecords within the DeleteRecordsResult will be non-nil, +// and contain the low-watermark offset (smallest available offset of all live replicas). +func (a *AdminClient) DeleteRecords(ctx context.Context, + recordsToDelete []TopicPartition, + options ...DeleteRecordsAdminOption) (result DeleteRecordsResults, err error) { + err = a.verifyClient() + if err != nil { + return result, err + } + + if len(recordsToDelete) == 0 { + return result, newErrorFromString(ErrInvalidArg, "No records to delete") + } + + // convert recordsToDelete to rd_kafka_DeleteRecords_t** required by implementation + cRecordsToDelete := newCPartsFromTopicPartitions(recordsToDelete) + defer C.rd_kafka_topic_partition_list_destroy(cRecordsToDelete) + + cDelRecords := make([]*C.rd_kafka_DeleteRecords_t, 1) + defer C.rd_kafka_DeleteRecords_destroy_array(&cDelRecords[0], C.size_t(1)) + + cDelRecords[0] = C.rd_kafka_DeleteRecords_new(cRecordsToDelete) + + // Convert Go AdminOptions (if any) to C AdminOptions. + genericOptions := make([]AdminOption, len(options)) + for i := range options { + genericOptions[i] = options[i] + } + cOptions, err := adminOptionsSetup( + a.handle, C.RD_KAFKA_ADMIN_OP_DELETERECORDS, genericOptions) + if err != nil { + return result, err + } + defer C.rd_kafka_AdminOptions_destroy(cOptions) + + // Create temporary queue for async operation. + cQueue := C.rd_kafka_queue_new(a.handle.rk) + defer C.rd_kafka_queue_destroy(cQueue) + + // Call rd_kafka_DeleteRecords (asynchronous). + C.rd_kafka_DeleteRecords( + a.handle.rk, + &cDelRecords[0], + C.size_t(1), + cOptions, + cQueue) + + // Wait for result, error or context timeout. + rkev, err := a.waitResult( + ctx, cQueue, C.RD_KAFKA_EVENT_DELETERECORDS_RESULT) + if err != nil { + return result, err + } + defer C.rd_kafka_event_destroy(rkev) + + cRes := C.rd_kafka_event_DeleteRecords_result(rkev) + cDeleteRecordsResultList := C.rd_kafka_DeleteRecords_result_offsets(cRes) + + // Convert result from C to Go. + result.DeleteRecordsResults = + cToDeletedRecordResult(cDeleteRecordsResultList) + + return result, nil +} + +// ElectLeaders performs Preferred or Unclean Elections for the specified topic Partitions or for all of them. +// +// Parameters: +// - `ctx` - context with the maximum amount of time to block, or nil for +// indefinite. +// - `electLeaderRequest` - ElectLeadersRequest containing the election type +// and the partitions to elect leaders for or nil for election in all the +// partitions. +// - `options` - ElectLeadersAdminOption options. +// +// Returns ElectLeadersResult, which contains a slice of TopicPartitions containing the partitions for which the leader election was performed. +// If we are passing partitions as nil, the broker will perform leader elections for all partitions, +// but the results will only contain partitions for which there was an election or resulted in an error. +// Individual TopicPartitions inside the ElectLeadersResult should be checked for errors. +// Additionally, an error that is not nil for client-level errors is returned. +func (a *AdminClient) ElectLeaders(ctx context.Context, electLeaderRequest ElectLeadersRequest, options ...ElectLeadersAdminOption) (result ElectLeadersResult, err error) { + + err = a.verifyClient() + if err != nil { + return result, err + } + + var cTopicPartitions *C.rd_kafka_topic_partition_list_t + if electLeaderRequest.partitions != nil { + cTopicPartitions = newCPartsFromTopicPartitions(electLeaderRequest.partitions) + defer C.rd_kafka_topic_partition_list_destroy(cTopicPartitions) + } + + cElectLeadersRequest := C.rd_kafka_ElectLeaders_new(C.rd_kafka_ElectionType_t(electLeaderRequest.electionType), cTopicPartitions) + defer C.rd_kafka_ElectLeaders_destroy(cElectLeadersRequest) + + // Convert Go AdminOptions (if any) to C AdminOptions. + genericOptions := make([]AdminOption, len(options)) + for i := range options { + genericOptions[i] = options[i] + } + cOptions, err := adminOptionsSetup( + a.handle, C.RD_KAFKA_ADMIN_OP_ELECTLEADERS, genericOptions) + if err != nil { + return result, err + } + defer C.rd_kafka_AdminOptions_destroy(cOptions) + + // Create temporary queue for async operation. + cQueue := C.rd_kafka_queue_new(a.handle.rk) + defer C.rd_kafka_queue_destroy(cQueue) + + // Call rd_kafka_ElectLeader (asynchronous). + C.rd_kafka_ElectLeaders( + a.handle.rk, + cElectLeadersRequest, + cOptions, + cQueue) + + // Wait for result, error or context timeout. + rkev, err := a.waitResult( + ctx, cQueue, C.RD_KAFKA_EVENT_ELECTLEADERS_RESULT) + if err != nil { + return result, err + } + defer C.rd_kafka_event_destroy(rkev) + + cRes := C.rd_kafka_event_ElectLeaders_result(rkev) + var cResponseSize C.size_t + + cResultPartitions := C.rd_kafka_ElectLeaders_result_partitions(cRes, &cResponseSize) + result.TopicPartitions = newTopicPartitionsFromCTopicPartitionResult(cResultPartitions, cResponseSize) + + return result, nil +} + +// NewAdminClient creats a new AdminClient instance with a new underlying client instance +func NewAdminClient(conf *ConfigMap) (*AdminClient, error) { + + err := versionCheck() + if err != nil { + return nil, err + } + + a := &AdminClient{} + a.handle = &handle{} + + // Convert ConfigMap to librdkafka conf_t + cConf, err := conf.convert() + if err != nil { + return nil, err + } + + cErrstr := (*C.char)(C.malloc(C.size_t(256))) + defer C.free(unsafe.Pointer(cErrstr)) + + C.rd_kafka_conf_set_events(cConf, C.RD_KAFKA_EVENT_STATS|C.RD_KAFKA_EVENT_ERROR|C.RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH) + + // Create librdkafka producer instance. The Producer is somewhat cheaper than + // the consumer, but any instance type can be used for Admin APIs. + a.handle.rk = C.rd_kafka_new(C.RD_KAFKA_PRODUCER, cConf, cErrstr, 256) + if a.handle.rk == nil { + return nil, newErrorFromCString(C.RD_KAFKA_RESP_ERR__INVALID_ARG, cErrstr) + } + + a.isDerived = false + a.handle.setup() + + a.isClosed = 0 + + return a, nil +} + +// NewAdminClientFromProducer derives a new AdminClient from an existing Producer instance. +// The AdminClient will use the same configuration and connections as the parent instance. +func NewAdminClientFromProducer(p *Producer) (a *AdminClient, err error) { + if p.handle.rk == nil { + return nil, newErrorFromString(ErrInvalidArg, "Can't derive AdminClient from closed producer") + } + + a = &AdminClient{} + a.handle = &p.handle + a.isDerived = true + a.isClosed = 0 + return a, nil +} + +// NewAdminClientFromConsumer derives a new AdminClient from an existing Consumer instance. +// The AdminClient will use the same configuration and connections as the parent instance. +func NewAdminClientFromConsumer(c *Consumer) (a *AdminClient, err error) { + if c.handle.rk == nil { + return nil, newErrorFromString(ErrInvalidArg, "Can't derive AdminClient from closed consumer") + } + + a = &AdminClient{} + a.handle = &c.handle + a.isDerived = true + a.isClosed = 0 + return a, nil +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/adminoptions.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/adminoptions.go new file mode 100644 index 00000000..da8a82a3 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/adminoptions.go @@ -0,0 +1,661 @@ +/** + * Copyright 2018 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka + +import ( + "fmt" + "time" + "unsafe" +) + +/* +#include "select_rdkafka.h" +#include +*/ +import "C" + +// AdminOptionOperationTimeout sets the broker's operation timeout, such as the +// timeout for CreateTopics to complete the creation of topics on the controller +// before returning a result to the application. +// +// CreateTopics, DeleteTopics, CreatePartitions: +// a value 0 will return immediately after triggering topic +// creation, while > 0 will wait this long for topic creation to propagate +// in cluster. +// +// Default: 0 (return immediately). +// +// Valid for CreateTopics, DeleteTopics, CreatePartitions. +type AdminOptionOperationTimeout struct { + isSet bool + val time.Duration +} + +func (ao AdminOptionOperationTimeout) supportsCreateTopics() { +} +func (ao AdminOptionOperationTimeout) supportsDeleteTopics() { +} +func (ao AdminOptionOperationTimeout) supportsCreatePartitions() { +} +func (ao AdminOptionOperationTimeout) supportsDeleteRecords() { +} +func (ao AdminOptionOperationTimeout) supportsElectLeaders() { +} + +func (ao AdminOptionOperationTimeout) apply(cOptions *C.rd_kafka_AdminOptions_t) error { + if !ao.isSet { + return nil + } + + cErrstrSize := C.size_t(512) + cErrstr := (*C.char)(C.malloc(cErrstrSize)) + defer C.free(unsafe.Pointer(cErrstr)) + + cErr := C.rd_kafka_AdminOptions_set_operation_timeout( + cOptions, C.int(durationToMilliseconds(ao.val)), + cErrstr, cErrstrSize) + if cErr != 0 { + C.rd_kafka_AdminOptions_destroy(cOptions) + return newCErrorFromString(cErr, + fmt.Sprintf("Failed to set operation timeout: %s", C.GoString(cErrstr))) + + } + + return nil +} + +// SetAdminOperationTimeout sets the broker's operation timeout, such as the +// timeout for CreateTopics to complete the creation of topics on the controller +// before returning a result to the application. +// +// CreateTopics, DeleteTopics, CreatePartitions: +// a value 0 will return immediately after triggering topic +// creation, while > 0 will wait this long for topic creation to propagate +// in cluster. +// +// Default: 0 (return immediately). +// +// Valid for CreateTopics, DeleteTopics, CreatePartitions. +func SetAdminOperationTimeout(t time.Duration) (ao AdminOptionOperationTimeout) { + ao.isSet = true + ao.val = t + return ao +} + +// AdminOptionRequestTimeout sets the overall request timeout, including broker +// lookup, request transmission, operation time on broker, and response. +// +// Default: `socket.timeout.ms`. +// +// Valid for all Admin API methods. +type AdminOptionRequestTimeout struct { + isSet bool + val time.Duration +} + +func (ao AdminOptionRequestTimeout) supportsCreateTopics() { +} +func (ao AdminOptionRequestTimeout) supportsDeleteTopics() { +} +func (ao AdminOptionRequestTimeout) supportsCreatePartitions() { +} +func (ao AdminOptionRequestTimeout) supportsAlterConfigs() { +} +func (ao AdminOptionRequestTimeout) supportsDescribeConfigs() { +} + +func (ao AdminOptionRequestTimeout) supportsCreateACLs() { +} + +func (ao AdminOptionRequestTimeout) supportsDescribeACLs() { +} + +func (ao AdminOptionRequestTimeout) supportsDeleteACLs() { +} + +func (ao AdminOptionRequestTimeout) supportsListConsumerGroups() { +} +func (ao AdminOptionRequestTimeout) supportsDescribeConsumerGroups() { +} +func (ao AdminOptionRequestTimeout) supportsDescribeTopics() { +} +func (ao AdminOptionRequestTimeout) supportsDescribeCluster() { +} +func (ao AdminOptionRequestTimeout) supportsDeleteConsumerGroups() { +} +func (ao AdminOptionRequestTimeout) supportsListConsumerGroupOffsets() { +} +func (ao AdminOptionRequestTimeout) supportsAlterConsumerGroupOffsets() { +} +func (ao AdminOptionRequestTimeout) supportsListOffsets() { +} +func (ao AdminOptionRequestTimeout) supportsDescribeUserScramCredentials() { +} +func (ao AdminOptionRequestTimeout) supportsAlterUserScramCredentials() { +} +func (ao AdminOptionRequestTimeout) supportsDeleteRecords() { +} +func (ao AdminOptionRequestTimeout) supportsElectLeaders() { +} +func (ao AdminOptionRequestTimeout) apply(cOptions *C.rd_kafka_AdminOptions_t) error { + if !ao.isSet { + return nil + } + + cErrstrSize := C.size_t(512) + cErrstr := (*C.char)(C.malloc(cErrstrSize)) + defer C.free(unsafe.Pointer(cErrstr)) + + cErr := C.rd_kafka_AdminOptions_set_request_timeout( + cOptions, C.int(durationToMilliseconds(ao.val)), + cErrstr, cErrstrSize) + if cErr != 0 { + C.rd_kafka_AdminOptions_destroy(cOptions) + return newCErrorFromString(cErr, + fmt.Sprintf("%s", C.GoString(cErrstr))) + + } + + return nil +} + +// SetAdminRequestTimeout sets the overall request timeout, including broker +// lookup, request transmission, operation time on broker, and response. +// +// Default: `socket.timeout.ms`. +// +// Valid for all Admin API methods. +func SetAdminRequestTimeout(t time.Duration) (ao AdminOptionRequestTimeout) { + ao.isSet = true + ao.val = t + return ao +} + +// IsolationLevel is a type which is used for AdminOptions to set the IsolationLevel. +type IsolationLevel int + +const ( + // IsolationLevelReadUncommitted - read uncommitted isolation level + IsolationLevelReadUncommitted IsolationLevel = C.RD_KAFKA_ISOLATION_LEVEL_READ_UNCOMMITTED + // IsolationLevelReadCommitted - read committed isolation level + IsolationLevelReadCommitted IsolationLevel = C.RD_KAFKA_ISOLATION_LEVEL_READ_COMMITTED +) + +// AdminOptionIsolationLevel sets the overall request IsolationLevel. +// +// Default: `ReadUncommitted`. +// +// Valid for ListOffsets. +type AdminOptionIsolationLevel struct { + isSet bool + val IsolationLevel +} + +func (ao AdminOptionIsolationLevel) supportsListOffsets() { +} +func (ao AdminOptionIsolationLevel) apply(cOptions *C.rd_kafka_AdminOptions_t) error { + if !ao.isSet { + return nil + } + + cErrstrSize := C.size_t(512) + cErrstr := (*C.char)(C.malloc(cErrstrSize)) + defer C.free(unsafe.Pointer(cErrstr)) + + cError := C.rd_kafka_AdminOptions_set_isolation_level( + cOptions, C.rd_kafka_IsolationLevel_t(ao.val)) + if cError != nil { + C.rd_kafka_AdminOptions_destroy(cOptions) + return newErrorFromCErrorDestroy(cError) + + } + + return nil + +} + +// SetAdminIsolationLevel sets the overall IsolationLevel for a request. +// +// Default: `ReadUncommitted`. +// +// Valid for ListOffsets. +func SetAdminIsolationLevel(isolationLevel IsolationLevel) (ao AdminOptionIsolationLevel) { + ao.isSet = true + ao.val = isolationLevel + return ao +} + +// AdminOptionValidateOnly tells the broker to only validate the request, +// without performing the requested operation (create topics, etc). +// +// Default: false. +// +// Valid for CreateTopics, CreatePartitions, AlterConfigs +type AdminOptionValidateOnly struct { + isSet bool + val bool +} + +func (ao AdminOptionValidateOnly) supportsCreateTopics() { +} +func (ao AdminOptionValidateOnly) supportsCreatePartitions() { +} +func (ao AdminOptionValidateOnly) supportsAlterConfigs() { +} + +func (ao AdminOptionValidateOnly) apply(cOptions *C.rd_kafka_AdminOptions_t) error { + if !ao.isSet { + return nil + } + + cErrstrSize := C.size_t(512) + cErrstr := (*C.char)(C.malloc(cErrstrSize)) + defer C.free(unsafe.Pointer(cErrstr)) + + cErr := C.rd_kafka_AdminOptions_set_validate_only( + cOptions, bool2cint(ao.val), + cErrstr, cErrstrSize) + if cErr != 0 { + C.rd_kafka_AdminOptions_destroy(cOptions) + return newCErrorFromString(cErr, + fmt.Sprintf("%s", C.GoString(cErrstr))) + + } + + return nil +} + +// SetAdminValidateOnly tells the broker to only validate the request, +// without performing the requested operation (create topics, etc). +// +// Default: false. +// +// Valid for CreateTopics, DeleteTopics, CreatePartitions, AlterConfigs +func SetAdminValidateOnly(validateOnly bool) (ao AdminOptionValidateOnly) { + ao.isSet = true + ao.val = validateOnly + return ao +} + +// AdminOptionRequireStableOffsets decides if the broker should return stable +// offsets (transaction-committed). +// +// Default: false +// +// Valid for ListConsumerGroupOffsets. +type AdminOptionRequireStableOffsets struct { + isSet bool + val bool +} + +func (ao AdminOptionRequireStableOffsets) supportsListConsumerGroupOffsets() { +} + +func (ao AdminOptionRequireStableOffsets) apply(cOptions *C.rd_kafka_AdminOptions_t) error { + if !ao.isSet { + return nil + } + + cError := C.rd_kafka_AdminOptions_set_require_stable_offsets( + cOptions, bool2cint(ao.val)) + if cError != nil { + C.rd_kafka_AdminOptions_destroy(cOptions) + return newErrorFromCErrorDestroy(cError) + } + + return nil +} + +// SetAdminRequireStableOffsets decides if the broker should return stable +// offsets (transaction-committed). +// +// Default: false +// +// Valid for ListConsumerGroupOffsets. +func SetAdminRequireStableOffsets(val bool) (ao AdminOptionRequireStableOffsets) { + ao.isSet = true + ao.val = val + return ao +} + +// AdminOptionMatchConsumerGroupStates decides groups in which state(s) should be +// listed. +// +// Default: nil (lists groups in all states). +// +// Valid for ListConsumerGroups. +type AdminOptionMatchConsumerGroupStates struct { + isSet bool + val []ConsumerGroupState +} + +func (ao AdminOptionMatchConsumerGroupStates) supportsListConsumerGroups() { +} + +func (ao AdminOptionMatchConsumerGroupStates) apply(cOptions *C.rd_kafka_AdminOptions_t) error { + if !ao.isSet || ao.val == nil { + return nil + } + + // Convert states from Go slice to C pointer. + cStates := make([]C.rd_kafka_consumer_group_state_t, len(ao.val)) + cStatesCount := C.size_t(len(ao.val)) + + for idx, state := range ao.val { + cStates[idx] = C.rd_kafka_consumer_group_state_t(state) + } + + cStatesPtr := ((*C.rd_kafka_consumer_group_state_t)(&cStates[0])) + cError := C.rd_kafka_AdminOptions_set_match_consumer_group_states( + cOptions, cStatesPtr, cStatesCount) + if cError != nil { + C.rd_kafka_AdminOptions_destroy(cOptions) + return newErrorFromCErrorDestroy(cError) + } + + return nil +} + +// SetAdminMatchConsumerGroupStates sets the state(s) that must be +// listed. +// +// Default: nil (lists groups in all states). +// +// Valid for ListConsumerGroups. +func SetAdminMatchConsumerGroupStates(val []ConsumerGroupState) (ao AdminOptionMatchConsumerGroupStates) { + ao.isSet = true + ao.val = val + return ao +} + +// AdminOptionMatchConsumerGroupTypes decides the type(s) that must be +// listed. +// +// Default: nil (lists groups of all types). +// +// Valid for ListConsumerGroups. +type AdminOptionMatchConsumerGroupTypes struct { + isSet bool + val []ConsumerGroupType +} + +func (ao AdminOptionMatchConsumerGroupTypes) supportsListConsumerGroups() { +} + +func (ao AdminOptionMatchConsumerGroupTypes) apply(cOptions *C.rd_kafka_AdminOptions_t) error { + if !ao.isSet || ao.val == nil { + return nil + } + + // Convert types from Go slice to C pointer. + cTypes := make([]C.rd_kafka_consumer_group_type_t, len(ao.val)) + cTypesCount := C.size_t(len(ao.val)) + + for idx, groupType := range ao.val { + cTypes[idx] = C.rd_kafka_consumer_group_type_t(groupType) + } + + cTypesPtr := ((*C.rd_kafka_consumer_group_type_t)(&cTypes[0])) + cError := C.rd_kafka_AdminOptions_set_match_consumer_group_types( + cOptions, cTypesPtr, cTypesCount) + if cError != nil { + C.rd_kafka_AdminOptions_destroy(cOptions) + return newErrorFromCErrorDestroy(cError) + } + + return nil +} + +// SetAdminMatchConsumerGroupTypes set the type(s) that must be +// listed. +// +// Default: nil (lists groups of all types). +// +// Valid for ListConsumerGroups. +func SetAdminMatchConsumerGroupTypes(val []ConsumerGroupType) (ao AdminOptionMatchConsumerGroupTypes) { + ao.isSet = true + ao.val = val + return ao +} + +// AdminOptionIncludeAuthorizedOperations decides if the broker should return +// authorized operations. +// +// Default: false +// +// Valid for DescribeConsumerGroups, DescribeTopics, DescribeCluster. +type AdminOptionIncludeAuthorizedOperations struct { + isSet bool + val bool +} + +func (ao AdminOptionIncludeAuthorizedOperations) supportsDescribeConsumerGroups() { +} +func (ao AdminOptionIncludeAuthorizedOperations) supportsDescribeTopics() { +} +func (ao AdminOptionIncludeAuthorizedOperations) supportsDescribeCluster() { +} + +func (ao AdminOptionIncludeAuthorizedOperations) apply(cOptions *C.rd_kafka_AdminOptions_t) error { + if !ao.isSet { + return nil + } + + cError := C.rd_kafka_AdminOptions_set_include_authorized_operations( + cOptions, bool2cint(ao.val)) + if cError != nil { + C.rd_kafka_AdminOptions_destroy(cOptions) + return newErrorFromCErrorDestroy(cError) + } + + return nil +} + +// SetAdminOptionIncludeAuthorizedOperations decides if the broker should return +// authorized operations. +// +// Default: false +// +// Valid for DescribeConsumerGroups, DescribeTopics, DescribeCluster. +func SetAdminOptionIncludeAuthorizedOperations(val bool) (ao AdminOptionIncludeAuthorizedOperations) { + ao.isSet = true + ao.val = val + return ao +} + +// CreateTopicsAdminOption - see setters. +// +// See SetAdminRequestTimeout, SetAdminOperationTimeout, SetAdminValidateOnly. +type CreateTopicsAdminOption interface { + supportsCreateTopics() + apply(cOptions *C.rd_kafka_AdminOptions_t) error +} + +// DeleteTopicsAdminOption - see setters. +// +// See SetAdminRequestTimeout, SetAdminOperationTimeout. +type DeleteTopicsAdminOption interface { + supportsDeleteTopics() + apply(cOptions *C.rd_kafka_AdminOptions_t) error +} + +// CreatePartitionsAdminOption - see setters. +// +// See SetAdminRequestTimeout, SetAdminOperationTimeout, SetAdminValidateOnly. +type CreatePartitionsAdminOption interface { + supportsCreatePartitions() + apply(cOptions *C.rd_kafka_AdminOptions_t) error +} + +// AlterConfigsAdminOption - see setters. +// +// See SetAdminRequestTimeout, SetAdminValidateOnly, SetAdminIncremental. +type AlterConfigsAdminOption interface { + supportsAlterConfigs() + apply(cOptions *C.rd_kafka_AdminOptions_t) error +} + +// DescribeConfigsAdminOption - see setters. +// +// See SetAdminRequestTimeout. +type DescribeConfigsAdminOption interface { + supportsDescribeConfigs() + apply(cOptions *C.rd_kafka_AdminOptions_t) error +} + +// CreateACLsAdminOption - see setter. +// +// See SetAdminRequestTimeout +type CreateACLsAdminOption interface { + supportsCreateACLs() + apply(cOptions *C.rd_kafka_AdminOptions_t) error +} + +// DescribeACLsAdminOption - see setter. +// +// See SetAdminRequestTimeout +type DescribeACLsAdminOption interface { + supportsDescribeACLs() + apply(cOptions *C.rd_kafka_AdminOptions_t) error +} + +// DeleteACLsAdminOption - see setter. +// +// See SetAdminRequestTimeout +type DeleteACLsAdminOption interface { + supportsDeleteACLs() + apply(cOptions *C.rd_kafka_AdminOptions_t) error +} + +// ListConsumerGroupsAdminOption - see setter. +// +// See SetAdminRequestTimeout, SetAdminMatchConsumerGroupStates, SetAdminMatchConsumerGroupTypes. +type ListConsumerGroupsAdminOption interface { + supportsListConsumerGroups() + apply(cOptions *C.rd_kafka_AdminOptions_t) error +} + +// DescribeConsumerGroupsAdminOption - see setter. +// +// See SetAdminRequestTimeout, SetAdminOptionIncludeAuthorizedOperations. +type DescribeConsumerGroupsAdminOption interface { + supportsDescribeConsumerGroups() + apply(cOptions *C.rd_kafka_AdminOptions_t) error +} + +// DescribeTopicsAdminOption - see setter. +// +// See SetAdminRequestTimeout, SetAdminOptionIncludeAuthorizedOperations. +type DescribeTopicsAdminOption interface { + supportsDescribeTopics() + apply(cOptions *C.rd_kafka_AdminOptions_t) error +} + +// DescribeClusterAdminOption - see setter. +// +// See SetAdminRequestTimeout, SetAdminOptionIncludeAuthorizedOperations. +type DescribeClusterAdminOption interface { + supportsDescribeCluster() + apply(cOptions *C.rd_kafka_AdminOptions_t) error +} + +// DeleteConsumerGroupsAdminOption - see setters. +// +// See SetAdminRequestTimeout. +type DeleteConsumerGroupsAdminOption interface { + supportsDeleteConsumerGroups() + apply(cOptions *C.rd_kafka_AdminOptions_t) error +} + +// ListConsumerGroupOffsetsAdminOption - see setter. +// +// See SetAdminRequestTimeout, SetAdminRequireStableOffsets. +type ListConsumerGroupOffsetsAdminOption interface { + supportsListConsumerGroupOffsets() + apply(cOptions *C.rd_kafka_AdminOptions_t) error +} + +// AlterConsumerGroupOffsetsAdminOption - see setter. +// +// See SetAdminRequestTimeout. +type AlterConsumerGroupOffsetsAdminOption interface { + supportsAlterConsumerGroupOffsets() + apply(cOptions *C.rd_kafka_AdminOptions_t) error +} + +// DescribeUserScramCredentialsAdminOption - see setter. +// +// See SetAdminRequestTimeout. +type DescribeUserScramCredentialsAdminOption interface { + supportsDescribeUserScramCredentials() + apply(cOptions *C.rd_kafka_AdminOptions_t) error +} + +// AlterUserScramCredentialsAdminOption - see setter. +// +// See SetAdminRequestTimeout. +type AlterUserScramCredentialsAdminOption interface { + supportsAlterUserScramCredentials() + apply(cOptions *C.rd_kafka_AdminOptions_t) error +} + +// ListOffsetsAdminOption - see setter. +// +// See SetAdminRequestTimeout, SetAdminIsolationLevel. +type ListOffsetsAdminOption interface { + supportsListOffsets() + apply(cOptions *C.rd_kafka_AdminOptions_t) error +} + +// DeleteRecordsAdminOption - see setter. +// +// See SetAdminRequestTimeout, SetAdminOperationTimeout. +type DeleteRecordsAdminOption interface { + supportsDeleteRecords() + apply(cOptions *C.rd_kafka_AdminOptions_t) error +} + +// ElectLeadersAdminOption - see setter. +// +// See SetAdminRequestTimeout, SetAdminOperationTimeout. +type ElectLeadersAdminOption interface { + supportsElectLeaders() + apply(cOptions *C.rd_kafka_AdminOptions_t) error +} + +// AdminOption is a generic type not to be used directly. +// +// See CreateTopicsAdminOption et.al. +type AdminOption interface { + apply(cOptions *C.rd_kafka_AdminOptions_t) error +} + +func adminOptionsSetup(h *handle, opType C.rd_kafka_admin_op_t, options []AdminOption) (*C.rd_kafka_AdminOptions_t, error) { + + cOptions := C.rd_kafka_AdminOptions_new(h.rk, opType) + for _, opt := range options { + if opt == nil { + continue + } + err := opt.apply(cOptions) + if err != nil { + return nil, err + } + } + + return cOptions, nil +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/api.html b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/api.html new file mode 100644 index 00000000..9caf2ec9 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/api.html @@ -0,0 +1,8165 @@ + + + + + + + + kafka - Go Documentation Server + + + + + + + + +
+ ... +
+ +
+
+

+ Package kafka + + +

+ + + + +
+
+
+ + import "github.com/confluentinc/confluent-kafka-go/v2/kafka" + +
+
+
+
+ + Overview + +
+
+ + Index + +
+
+
+
+
+ +
+ +
+

+ Overview ▾ +

+

+ Package kafka provides high-level Apache Kafka producer and consumers +using bindings on-top of the librdkafka C library. +

+ High-level Consumer +

+

+ * Decide if you want to read messages and events by calling `.Poll()` or +the deprecated option of using the `.Events()` channel. (If you want to use +`.Events()` channel then set `"go.events.channel.enable": true`). +

+ * Create a Consumer with `kafka.NewConsumer()` providing at +least the `bootstrap.servers` and `group.id` configuration properties. +

+ * Call `.Subscribe()` or (`.SubscribeTopics()` to subscribe to multiple topics) +to join the group with the specified subscription set. +Subscriptions are atomic, calling `.Subscribe*()` again will leave +the group and rejoin with the new set of topics. +

+ * Start reading events and messages from either the `.Events` channel +or by calling `.Poll()`. +

+ * When the group has rebalanced each client member is assigned a +(sub-)set of topic+partitions. +By default the consumer will start fetching messages for its assigned +partitions at this point, but your application may enable rebalance +events to get an insight into what the assigned partitions where +as well as set the initial offsets. To do this you need to pass +`"go.application.rebalance.enable": true` to the `NewConsumer()` call +mentioned above. You will (eventually) see a `kafka.AssignedPartitions` event +with the assigned partition set. You can optionally modify the initial +offsets (they'll default to stored offsets and if there are no previously stored +offsets it will fall back to `"auto.offset.reset"` +which defaults to the `latest` message) and then call `.Assign(partitions)` +to start consuming. If you don't need to modify the initial offsets you will +not need to call `.Assign()`, the client will do so automatically for you if +you dont, unless you are using the channel-based consumer in which case +you MUST call `.Assign()` when receiving the `AssignedPartitions` and +`RevokedPartitions` events. +

+ * As messages are fetched they will be made available on either the +`.Events` channel or by calling `.Poll()`, look for event type `*kafka.Message`. +

+ * Handle messages, events and errors to your liking. +

+ * When you are done consuming call `.Close()` to commit final offsets +and leave the consumer group. +

+ Producer +

+

+ * Create a Producer with `kafka.NewProducer()` providing at least +the `bootstrap.servers` configuration properties. +

+ * Messages may now be produced either by sending a `*kafka.Message` +on the `.ProduceChannel` or by calling `.Produce()`. +

+ * Producing is an asynchronous operation so the client notifies the application +of per-message produce success or failure through something called delivery reports. +Delivery reports are by default emitted on the `.Events()` channel as `*kafka.Message` +and you should check `msg.TopicPartition.Error` for `nil` to find out if the message +was succesfully delivered or not. +It is also possible to direct delivery reports to alternate channels +by providing a non-nil `chan Event` channel to `.Produce()`. +If no delivery reports are wanted they can be completely disabled by +setting configuration property `"go.delivery.reports": false`. +

+ * When you are done producing messages you will need to make sure all messages +are indeed delivered to the broker (or failed), remember that this is +an asynchronous client so some of your messages may be lingering in internal +channels or tranmission queues. +To do this you can either keep track of the messages you've produced +and wait for their corresponding delivery reports, or call the convenience +function `.Flush()` that will block until all message deliveries are done +or the provided timeout elapses. +

+ * Finally call `.Close()` to decommission the producer. +

+ Transactional producer API +

+

+ The transactional producer operates on top of the idempotent producer, +and provides full exactly-once semantics (EOS) for Apache Kafka when used +with the transaction aware consumer (`isolation.level=read_committed`). +

+ A producer instance is configured for transactions by setting the +`transactional.id` to an identifier unique for the application. This +id will be used to fence stale transactions from previous instances of +the application, typically following an outage or crash. +

+ After creating the transactional producer instance using `NewProducer()` +the transactional state must be initialized by calling +`InitTransactions()`. This is a blocking call that will +acquire a runtime producer id from the transaction coordinator broker +as well as abort any stale transactions and fence any still running producer +instances with the same `transactional.id`. +

+ Once transactions are initialized the application may begin a new +transaction by calling `BeginTransaction()`. +A producer instance may only have one single on-going transaction. +

+ Any messages produced after the transaction has been started will +belong to the ongoing transaction and will be committed or aborted +atomically. +It is not permitted to produce messages outside a transaction +boundary, e.g., before `BeginTransaction()` or after `CommitTransaction()`, +`AbortTransaction()` or if the current transaction has failed. +

+ If consumed messages are used as input to the transaction, the consumer +instance must be configured with `enable.auto.commit` set to `false`. +To commit the consumed offsets along with the transaction pass the +list of consumed partitions and the last offset processed + 1 to +`SendOffsetsToTransaction()` prior to committing the transaction. +This allows an aborted transaction to be restarted using the previously +committed offsets. +

+ To commit the produced messages, and any consumed offsets, to the +current transaction, call `CommitTransaction()`. +This call will block until the transaction has been fully committed or +failed (typically due to fencing by a newer producer instance). +

+ Alternatively, if processing fails, or an abortable transaction error is +raised, the transaction needs to be aborted by calling +`AbortTransaction()` which marks any produced messages and +offset commits as aborted. +

+ After the current transaction has been committed or aborted a new +transaction may be started by calling `BeginTransaction()` again. +

+ Retriable errors: +Some error cases allow the attempted operation to be retried, this is +indicated by the error object having the retriable flag set which can +be detected by calling `err.(kafka.Error).IsRetriable()`. +When this flag is set the application may retry the operation immediately +or preferably after a shorter grace period (to avoid busy-looping). +Retriable errors include timeouts, broker transport failures, etc. +

+ Abortable errors: +An ongoing transaction may fail permanently due to various errors, +such as transaction coordinator becoming unavailable, write failures to the +Apache Kafka log, under-replicated partitions, etc. +At this point the producer application must abort the current transaction +using `AbortTransaction()` and optionally start a new transaction +by calling `BeginTransaction()`. +Whether an error is abortable or not is detected by calling +`err.(kafka.Error).TxnRequiresAbort()` on the returned error object. +

+ Fatal errors: +While the underlying idempotent producer will typically only raise +fatal errors for unrecoverable cluster errors where the idempotency +guarantees can't be maintained, most of these are treated as abortable by +the transactional producer since transactions may be aborted and retried +in their entirety; +The transactional producer on the other hand introduces a set of additional +fatal errors which the application needs to handle by shutting down the +producer and terminate. There is no way for a producer instance to recover +from fatal errors. +Whether an error is fatal or not is detected by calling +`err.(kafka.Error).IsFatal()` on the returned error object or by checking +the global `GetFatalError()`. +

+ Handling of other errors: +For errors that have neither retriable, abortable or the fatal flag set +it is not always obvious how to handle them. While some of these errors +may be indicative of bugs in the application code, such as when +an invalid parameter is passed to a method, other errors might originate +from the broker and be passed thru as-is to the application. +The general recommendation is to treat these errors, that have +neither the retriable or abortable flags set, as fatal. +

+ Error handling example: +

retry:
+
+   err := producer.CommitTransaction(...)
+   if err == nil {
+       return nil
+   } else if err.(kafka.Error).TxnRequiresAbort() {
+       do_abort_transaction_and_reset_inputs()
+   } else if err.(kafka.Error).IsRetriable() {
+       goto retry
+   } else { // treat all other errors as fatal errors
+       panic(err)
+   }
+
+

+ Events +

+

+ Apart from emitting messages and delivery reports the client also communicates +with the application through a number of different event types. +An application may choose to handle or ignore these events. +

+ Consumer events +

+

+ * `*kafka.Message` - a fetched message. +

+ * `AssignedPartitions` - The assigned partition set for this client following a rebalance. +Requires `go.application.rebalance.enable` +

+ * `RevokedPartitions` - The counter part to `AssignedPartitions` following a rebalance. +`AssignedPartitions` and `RevokedPartitions` are symmetrical. +Requires `go.application.rebalance.enable` +

+ * `PartitionEOF` - Consumer has reached the end of a partition. +NOTE: The consumer will keep trying to fetch new messages for the partition. +

+ * `OffsetsCommitted` - Offset commit results (when `enable.auto.commit` is enabled). +

+ Producer events +

+

+ * `*kafka.Message` - delivery report for produced message. +Check `.TopicPartition.Error` for delivery result. +

+ Generic events for both Consumer and Producer +

+

+ * `KafkaError` - client (error codes are prefixed with _) or broker error. +These errors are normally just informational since the +client will try its best to automatically recover (eventually). +

+ * `OAuthBearerTokenRefresh` - retrieval of a new SASL/OAUTHBEARER token is required. +This event only occurs with sasl.mechanism=OAUTHBEARER. +Be sure to invoke SetOAuthBearerToken() on the Producer/Consumer/AdminClient +instance when a successful token retrieval is completed, otherwise be sure to +invoke SetOAuthBearerTokenFailure() to indicate that retrieval failed (or +if setting the token failed, which could happen if an extension doesn't meet +the required regular expression); invoking SetOAuthBearerTokenFailure() will +schedule a new event for 10 seconds later so another retrieval can be attempted. +

+ Hint: If your application registers a signal notification +(signal.Notify) makes sure the signals channel is buffered to avoid +possible complications with blocking Poll() calls. +

+ Note: The Confluent Kafka Go client is safe for concurrent use. +

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+
+
+
+ +
+

+ Index ▾ +

+ +
+
+
+ + Constants + +
+
+ + func LibraryVersion() (int, string) + +
+
+ + func WriteErrorCodes(f *os.File) + +
+
+ + type ACLBinding + +
+
+ + type ACLBindingFilter + +
+
+ + type ACLBindingFilters + +
+
+ + type ACLBindings + +
+
+ + func (a ACLBindings) Len() int + +
+
+ + func (a ACLBindings) Less(i, j int) bool + +
+
+ + func (a ACLBindings) Swap(i, j int) + +
+
+ + type ACLOperation + +
+
+ + func ACLOperationFromString(aclOperationString string) (ACLOperation, error) + +
+
+ + func (o ACLOperation) String() string + +
+
+ + type ACLPermissionType + +
+
+ + func ACLPermissionTypeFromString(aclPermissionTypeString string) (ACLPermissionType, error) + +
+
+ + func (o ACLPermissionType) String() string + +
+
+ + type AdminClient + +
+
+ + func NewAdminClient(conf *ConfigMap) (*AdminClient, error) + +
+
+ + func NewAdminClientFromConsumer(c *Consumer) (a *AdminClient, err error) + +
+
+ + func NewAdminClientFromProducer(p *Producer) (a *AdminClient, err error) + +
+
+ + func (a *AdminClient) AlterConfigs(ctx context.Context, resources []ConfigResource, options ...AlterConfigsAdminOption) (result []ConfigResourceResult, err error) + +
+
+ + func (a *AdminClient) AlterConsumerGroupOffsets(ctx context.Context, groupsPartitions []ConsumerGroupTopicPartitions, options ...AlterConsumerGroupOffsetsAdminOption) (acgor AlterConsumerGroupOffsetsResult, err error) + +
+
+ + func (a *AdminClient) AlterUserScramCredentials(ctx context.Context, upsertions []UserScramCredentialUpsertion, deletions []UserScramCredentialDeletion, options ...AlterUserScramCredentialsAdminOption) (result AlterUserScramCredentialsResult, err error) + +
+
+ + func (a *AdminClient) Close() + +
+
+ + func (a *AdminClient) ClusterID(ctx context.Context) (clusterID string, err error) + +
+
+ + func (a *AdminClient) ControllerID(ctx context.Context) (controllerID int32, err error) + +
+
+ + func (a *AdminClient) CreateACLs(ctx context.Context, aclBindings ACLBindings, options ...CreateACLsAdminOption) (result []CreateACLResult, err error) + +
+
+ + func (a *AdminClient) CreatePartitions(ctx context.Context, partitions []PartitionsSpecification, options ...CreatePartitionsAdminOption) (result []TopicResult, err error) + +
+
+ + func (a *AdminClient) CreateTopics(ctx context.Context, topics []TopicSpecification, options ...CreateTopicsAdminOption) (result []TopicResult, err error) + +
+
+ + func (a *AdminClient) DeleteACLs(ctx context.Context, aclBindingFilters ACLBindingFilters, options ...DeleteACLsAdminOption) (result []DeleteACLsResult, err error) + +
+
+ + func (a *AdminClient) DeleteConsumerGroups(ctx context.Context, groups []string, options ...DeleteConsumerGroupsAdminOption) (result DeleteConsumerGroupsResult, err error) + +
+
+ + func (a *AdminClient) DeleteRecords(ctx context.Context, recordsToDelete []TopicPartition, options ...DeleteRecordsAdminOption) (result DeleteRecordsResults, err error) + +
+
+ + func (a *AdminClient) DeleteTopics(ctx context.Context, topics []string, options ...DeleteTopicsAdminOption) (result []TopicResult, err error) + +
+
+ + func (a *AdminClient) DescribeACLs(ctx context.Context, aclBindingFilter ACLBindingFilter, options ...DescribeACLsAdminOption) (result *DescribeACLsResult, err error) + +
+
+ + func (a *AdminClient) DescribeCluster(ctx context.Context, options ...DescribeClusterAdminOption) (result DescribeClusterResult, err error) + +
+
+ + func (a *AdminClient) DescribeConfigs(ctx context.Context, resources []ConfigResource, options ...DescribeConfigsAdminOption) (result []ConfigResourceResult, err error) + +
+
+ + func (a *AdminClient) DescribeConsumerGroups(ctx context.Context, groups []string, options ...DescribeConsumerGroupsAdminOption) (result DescribeConsumerGroupsResult, err error) + +
+
+ + func (a *AdminClient) DescribeTopics(ctx context.Context, topics TopicCollection, options ...DescribeTopicsAdminOption) (result DescribeTopicsResult, err error) + +
+
+ + func (a *AdminClient) DescribeUserScramCredentials(ctx context.Context, users []string, options ...DescribeUserScramCredentialsAdminOption) (result DescribeUserScramCredentialsResult, err error) + +
+
+ + func (a *AdminClient) ElectLeaders(ctx context.Context, electLeaderRequest ElectLeadersRequest, options ...ElectLeadersAdminOption) (result ElectLeadersResult, err error) + +
+
+ + func (a *AdminClient) GetMetadata(topic *string, allTopics bool, timeoutMs int) (*Metadata, error) + +
+
+ + func (a *AdminClient) IncrementalAlterConfigs(ctx context.Context, resources []ConfigResource, options ...AlterConfigsAdminOption) (result []ConfigResourceResult, err error) + +
+
+ + func (a *AdminClient) IsClosed() bool + +
+
+ + func (a *AdminClient) ListConsumerGroupOffsets(ctx context.Context, groupsPartitions []ConsumerGroupTopicPartitions, options ...ListConsumerGroupOffsetsAdminOption) (lcgor ListConsumerGroupOffsetsResult, err error) + +
+
+ + func (a *AdminClient) ListConsumerGroups(ctx context.Context, options ...ListConsumerGroupsAdminOption) (result ListConsumerGroupsResult, err error) + +
+
+ + func (a *AdminClient) ListOffsets(ctx context.Context, topicPartitionOffsets map[TopicPartition]OffsetSpec, options ...ListOffsetsAdminOption) (result ListOffsetsResult, err error) + +
+
+ + func (a *AdminClient) SetOAuthBearerToken(oauthBearerToken OAuthBearerToken) error + +
+
+ + func (a *AdminClient) SetOAuthBearerTokenFailure(errstr string) error + +
+
+ + func (a *AdminClient) SetSaslCredentials(username, password string) error + +
+
+ + func (a *AdminClient) String() string + +
+
+ + type AdminOption + +
+
+ + type AdminOptionIncludeAuthorizedOperations + +
+
+ + func SetAdminOptionIncludeAuthorizedOperations(val bool) (ao AdminOptionIncludeAuthorizedOperations) + +
+
+ + type AdminOptionIsolationLevel + +
+
+ + func SetAdminIsolationLevel(isolationLevel IsolationLevel) (ao AdminOptionIsolationLevel) + +
+
+ + type AdminOptionMatchConsumerGroupStates + +
+
+ + func SetAdminMatchConsumerGroupStates(val []ConsumerGroupState) (ao AdminOptionMatchConsumerGroupStates) + +
+
+ + type AdminOptionMatchConsumerGroupTypes + +
+
+ + func SetAdminMatchConsumerGroupTypes(val []ConsumerGroupType) (ao AdminOptionMatchConsumerGroupTypes) + +
+
+ + type AdminOptionOperationTimeout + +
+
+ + func SetAdminOperationTimeout(t time.Duration) (ao AdminOptionOperationTimeout) + +
+
+ + type AdminOptionRequestTimeout + +
+
+ + func SetAdminRequestTimeout(t time.Duration) (ao AdminOptionRequestTimeout) + +
+
+ + type AdminOptionRequireStableOffsets + +
+
+ + func SetAdminRequireStableOffsets(val bool) (ao AdminOptionRequireStableOffsets) + +
+
+ + type AdminOptionValidateOnly + +
+
+ + func SetAdminValidateOnly(validateOnly bool) (ao AdminOptionValidateOnly) + +
+
+ + type AlterConfigOpType + +
+
+ + func (o AlterConfigOpType) String() string + +
+
+ + type AlterConfigsAdminOption + +
+
+ + type AlterConsumerGroupOffsetsAdminOption + +
+
+ + type AlterConsumerGroupOffsetsResult + +
+
+ + type AlterOperation + +
+
+ + func (o AlterOperation) String() string + +
+
+ + type AlterUserScramCredentialsAdminOption + +
+
+ + type AlterUserScramCredentialsResult + +
+
+ + type AssignedPartitions + +
+
+ + func (e AssignedPartitions) String() string + +
+
+ + type BrokerMetadata + +
+
+ + type ConfigEntry + +
+
+ + func StringMapToConfigEntries(stringMap map[string]string, operation AlterOperation) []ConfigEntry + +
+
+ + func StringMapToIncrementalConfigEntries(stringMap map[string]string, operationMap map[string]AlterConfigOpType) []ConfigEntry + +
+
+ + func (c ConfigEntry) String() string + +
+
+ + type ConfigEntryResult + +
+
+ + func (c ConfigEntryResult) String() string + +
+
+ + type ConfigMap + +
+
+ + func (m ConfigMap) Get(key string, defval ConfigValue) (ConfigValue, error) + +
+
+ + func (m ConfigMap) Set(kv string) error + +
+
+ + func (m ConfigMap) SetKey(key string, value ConfigValue) error + +
+
+ + type ConfigResource + +
+
+ + func (c ConfigResource) String() string + +
+
+ + type ConfigResourceResult + +
+
+ + func (c ConfigResourceResult) String() string + +
+
+ + type ConfigSource + +
+
+ + func (t ConfigSource) String() string + +
+
+ + type ConfigValue + +
+
+ + type Consumer + +
+
+ + func NewConsumer(conf *ConfigMap) (*Consumer, error) + +
+
+ + func (c *Consumer) Assign(partitions []TopicPartition) (err error) + +
+
+ + func (c *Consumer) Assignment() (partitions []TopicPartition, err error) + +
+
+ + func (c *Consumer) AssignmentLost() bool + +
+
+ + func (c *Consumer) Close() (err error) + +
+
+ + func (c *Consumer) Commit() ([]TopicPartition, error) + +
+
+ + func (c *Consumer) CommitMessage(m *Message) ([]TopicPartition, error) + +
+
+ + func (c *Consumer) CommitOffsets(offsets []TopicPartition) ([]TopicPartition, error) + +
+
+ + func (c *Consumer) Committed(partitions []TopicPartition, timeoutMs int) (offsets []TopicPartition, err error) + +
+
+ + func (c *Consumer) Events() chan Event + +
+
+ + func (c *Consumer) GetConsumerGroupMetadata() (*ConsumerGroupMetadata, error) + +
+
+ + func (c *Consumer) GetMetadata(topic *string, allTopics bool, timeoutMs int) (*Metadata, error) + +
+
+ + func (c *Consumer) GetRebalanceProtocol() string + +
+
+ + func (c *Consumer) GetWatermarkOffsets(topic string, partition int32) (low, high int64, err error) + +
+
+ + func (c *Consumer) IncrementalAssign(partitions []TopicPartition) (err error) + +
+
+ + func (c *Consumer) IncrementalUnassign(partitions []TopicPartition) (err error) + +
+
+ + func (c *Consumer) IsClosed() bool + +
+
+ + func (c *Consumer) Logs() chan LogEvent + +
+
+ + func (c *Consumer) OffsetsForTimes(times []TopicPartition, timeoutMs int) (offsets []TopicPartition, err error) + +
+
+ + func (c *Consumer) Pause(partitions []TopicPartition) (err error) + +
+
+ + func (c *Consumer) Poll(timeoutMs int) (event Event) + +
+
+ + func (c *Consumer) Position(partitions []TopicPartition) (offsets []TopicPartition, err error) + +
+
+ + func (c *Consumer) QueryWatermarkOffsets(topic string, partition int32, timeoutMs int) (low, high int64, err error) + +
+
+ + func (c *Consumer) ReadMessage(timeout time.Duration) (*Message, error) + +
+
+ + func (c *Consumer) Resume(partitions []TopicPartition) (err error) + +
+
+ + func (c *Consumer) Seek(partition TopicPartition, ignoredTimeoutMs int) error + +
+
+ + func (c *Consumer) SeekPartitions(partitions []TopicPartition) ([]TopicPartition, error) + +
+
+ + func (c *Consumer) SetOAuthBearerToken(oauthBearerToken OAuthBearerToken) error + +
+
+ + func (c *Consumer) SetOAuthBearerTokenFailure(errstr string) error + +
+
+ + func (c *Consumer) SetSaslCredentials(username, password string) error + +
+
+ + func (c *Consumer) StoreMessage(m *Message) (storedOffsets []TopicPartition, err error) + +
+
+ + func (c *Consumer) StoreOffsets(offsets []TopicPartition) (storedOffsets []TopicPartition, err error) + +
+
+ + func (c *Consumer) String() string + +
+
+ + func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error + +
+
+ + func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) (err error) + +
+
+ + func (c *Consumer) Subscription() (topics []string, err error) + +
+
+ + func (c *Consumer) Unassign() (err error) + +
+
+ + func (c *Consumer) Unsubscribe() (err error) + +
+
+ + type ConsumerGroupDescription + +
+
+ + type ConsumerGroupListing + +
+
+ + type ConsumerGroupMetadata + +
+
+ + func NewTestConsumerGroupMetadata(groupID string) (*ConsumerGroupMetadata, error) + +
+
+ + type ConsumerGroupResult + +
+
+ + func (g ConsumerGroupResult) String() string + +
+
+ + type ConsumerGroupState + +
+
+ + func ConsumerGroupStateFromString(stateString string) (ConsumerGroupState, error) + +
+
+ + func (t ConsumerGroupState) String() string + +
+
+ + type ConsumerGroupTopicPartitions + +
+
+ + func (gtp ConsumerGroupTopicPartitions) String() string + +
+
+ + type ConsumerGroupType + +
+
+ + func ConsumerGroupTypeFromString(typeString string) ConsumerGroupType + +
+
+ + func (t ConsumerGroupType) String() string + +
+
+ + type CreateACLResult + +
+
+ + type CreateACLsAdminOption + +
+
+ + type CreatePartitionsAdminOption + +
+
+ + type CreateTopicsAdminOption + +
+
+ + type DeleteACLsAdminOption + +
+
+ + type DeleteACLsResult + +
+
+ + type DeleteConsumerGroupsAdminOption + +
+
+ + type DeleteConsumerGroupsResult + +
+
+ + type DeleteRecordsAdminOption + +
+
+ + type DeleteRecordsResult + +
+
+ + type DeleteRecordsResults + +
+
+ + type DeleteTopicsAdminOption + +
+
+ + type DeletedRecords + +
+
+ + type DescribeACLsAdminOption + +
+
+ + type DescribeACLsResult + +
+
+ + type DescribeClusterAdminOption + +
+
+ + type DescribeClusterResult + +
+
+ + type DescribeConfigsAdminOption + +
+
+ + type DescribeConsumerGroupsAdminOption + +
+
+ + type DescribeConsumerGroupsResult + +
+
+ + type DescribeTopicsAdminOption + +
+
+ + type DescribeTopicsResult + +
+
+ + type DescribeUserScramCredentialsAdminOption + +
+
+ + type DescribeUserScramCredentialsResult + +
+
+ + type ElectLeadersAdminOption + +
+
+ + type ElectLeadersRequest + +
+
+ + func NewElectLeadersRequest(electionType ElectionType, partitions []TopicPartition) ElectLeadersRequest + +
+
+ + type ElectLeadersResult + +
+
+ + type ElectionType + +
+
+ + func ElectionTypeFromString(electionTypeString string) (ElectionType, error) + +
+
+ + type Error + +
+
+ + func NewError(code ErrorCode, str string, fatal bool) (err Error) + +
+
+ + func (e Error) Code() ErrorCode + +
+
+ + func (e Error) Error() string + +
+
+ + func (e Error) IsFatal() bool + +
+
+ + func (e Error) IsRetriable() bool + +
+
+ + func (e Error) IsTimeout() bool + +
+
+ + func (e Error) String() string + +
+
+ + func (e Error) TxnRequiresAbort() bool + +
+
+ + type ErrorCode + +
+
+ + func (c ErrorCode) String() string + +
+
+ + type Event + +
+
+ + type Handle + +
+
+ + type Header + +
+
+ + func (h Header) String() string + +
+
+ + type IsolationLevel + +
+
+ + type ListConsumerGroupOffsetsAdminOption + +
+
+ + type ListConsumerGroupOffsetsResult + +
+
+ + type ListConsumerGroupsAdminOption + +
+
+ + type ListConsumerGroupsResult + +
+
+ + type ListOffsetsAdminOption + +
+
+ + type ListOffsetsResult + +
+
+ + type ListOffsetsResultInfo + +
+
+ + type LogEvent + +
+
+ + func (logEvent LogEvent) String() string + +
+
+ + type MemberAssignment + +
+
+ + type MemberDescription + +
+
+ + type Message + +
+
+ + func (m *Message) String() string + +
+
+ + type Metadata + +
+
+ + type MockCluster + +
+
+ + func NewMockCluster(brokerCount int) (*MockCluster, error) + +
+
+ + func (mc *MockCluster) BootstrapServers() string + +
+
+ + func (mc *MockCluster) Close() + +
+
+ + func (mc *MockCluster) CreateTopic(topic string, partitions, replicationFactor int) error + +
+
+ + func (mc *MockCluster) SetBrokerDown(brokerID int) error + +
+
+ + func (mc *MockCluster) SetBrokerUp(brokerID int) error + +
+
+ + func (mc *MockCluster) SetRoundtripDuration(brokerID int, duration time.Duration) error + +
+
+ + type Node + +
+
+ + func (n Node) String() string + +
+
+ + type OAuthBearerToken + +
+
+ + type OAuthBearerTokenRefresh + +
+
+ + func (o OAuthBearerTokenRefresh) String() string + +
+
+ + type Offset + +
+
+ + func NewOffset(offset interface{}) (Offset, error) + +
+
+ + func OffsetTail(relativeOffset Offset) Offset + +
+
+ + func (o *Offset) Set(offset interface{}) error + +
+
+ + func (o Offset) String() string + +
+
+ + type OffsetSpec + +
+
+ + func NewOffsetSpecForTimestamp(timestamp int64) OffsetSpec + +
+
+ + type OffsetsCommitted + +
+
+ + func (o OffsetsCommitted) String() string + +
+
+ + type PartitionEOF + +
+
+ + func (p PartitionEOF) String() string + +
+
+ + type PartitionMetadata + +
+
+ + type PartitionsSpecification + +
+
+ + type Producer + +
+
+ + func NewProducer(conf *ConfigMap) (*Producer, error) + +
+
+ + func (p *Producer) AbortTransaction(ctx context.Context) error + +
+
+ + func (p *Producer) BeginTransaction() error + +
+
+ + func (p *Producer) Close() + +
+
+ + func (p *Producer) CommitTransaction(ctx context.Context) error + +
+
+ + func (p *Producer) Events() chan Event + +
+
+ + func (p *Producer) Flush(timeoutMs int) int + +
+
+ + func (p *Producer) GetFatalError() error + +
+
+ + func (p *Producer) GetMetadata(topic *string, allTopics bool, timeoutMs int) (*Metadata, error) + +
+
+ + func (p *Producer) InitTransactions(ctx context.Context) error + +
+
+ + func (p *Producer) IsClosed() bool + +
+
+ + func (p *Producer) Len() int + +
+
+ + func (p *Producer) Logs() chan LogEvent + +
+
+ + func (p *Producer) OffsetsForTimes(times []TopicPartition, timeoutMs int) (offsets []TopicPartition, err error) + +
+
+ + func (p *Producer) Produce(msg *Message, deliveryChan chan Event) error + +
+
+ + func (p *Producer) ProduceChannel() chan *Message + +
+
+ + func (p *Producer) Purge(flags int) error + +
+
+ + func (p *Producer) QueryWatermarkOffsets(topic string, partition int32, timeoutMs int) (low, high int64, err error) + +
+
+ + func (p *Producer) SendOffsetsToTransaction(ctx context.Context, offsets []TopicPartition, consumerMetadata *ConsumerGroupMetadata) error + +
+
+ + func (p *Producer) SetOAuthBearerToken(oauthBearerToken OAuthBearerToken) error + +
+
+ + func (p *Producer) SetOAuthBearerTokenFailure(errstr string) error + +
+
+ + func (p *Producer) SetSaslCredentials(username, password string) error + +
+
+ + func (p *Producer) String() string + +
+
+ + func (p *Producer) TestFatalError(code ErrorCode, str string) ErrorCode + +
+
+ + type RebalanceCb + +
+
+ + type ResourcePatternType + +
+
+ + func ResourcePatternTypeFromString(patternTypeString string) (ResourcePatternType, error) + +
+
+ + func (t ResourcePatternType) String() string + +
+
+ + type ResourceType + +
+
+ + func ResourceTypeFromString(typeString string) (ResourceType, error) + +
+
+ + func (t ResourceType) String() string + +
+
+ + type RevokedPartitions + +
+
+ + func (e RevokedPartitions) String() string + +
+
+ + type ScramCredentialInfo + +
+
+ + type ScramMechanism + +
+
+ + func ScramMechanismFromString(mechanism string) (ScramMechanism, error) + +
+
+ + func (o ScramMechanism) String() string + +
+
+ + type Stats + +
+
+ + func (e Stats) String() string + +
+
+ + type TimestampType + +
+
+ + func (t TimestampType) String() string + +
+
+ + type TopicCollection + +
+
+ + func NewTopicCollectionOfTopicNames(names []string) TopicCollection + +
+
+ + type TopicDescription + +
+
+ + type TopicMetadata + +
+
+ + type TopicPartition + +
+
+ + func (p TopicPartition) String() string + +
+
+ + type TopicPartitionInfo + +
+
+ + type TopicPartitions + +
+
+ + func (tps TopicPartitions) Len() int + +
+
+ + func (tps TopicPartitions) Less(i, j int) bool + +
+
+ + func (tps TopicPartitions) Swap(i, j int) + +
+
+ + type TopicResult + +
+
+ + func (t TopicResult) String() string + +
+
+ + type TopicSpecification + +
+
+ + type UUID + +
+
+ + func (uuid UUID) GetLeastSignificantBits() int64 + +
+
+ + func (uuid UUID) GetMostSignificantBits() int64 + +
+
+ + func (uuid UUID) String() string + +
+
+ + type UserScramCredentialDeletion + +
+
+ + type UserScramCredentialUpsertion + +
+
+ + type UserScramCredentialsDescription + +
+
+
+ +

+ Package files +

+

+ + + 00version.go + + + adminapi.go + + + adminoptions.go + + + build_glibc_linux_amd64.go + + + config.go + + + consumer.go + + + context.go + + + error.go + + + error_gen.go + + + event.go + + + generated_errors.go + + + handle.go + + + header.go + + + kafka.go + + + log.go + + + message.go + + + metadata.go + + + misc.go + + + mockcluster.go + + + offset.go + + + producer.go + + + time.go + + +

+
+ +
+ +

+ Constants +

+
const (
+    // PurgeInFlight purges messages in-flight to or from the broker.
+    // Purging these messages will void any future acknowledgements from the
+    // broker, making it impossible for the application to know if these
+    // messages were successfully delivered or not.
+    // Retrying these messages may lead to duplicates.
+    PurgeInFlight = int(C.RD_KAFKA_PURGE_F_INFLIGHT)
+
+    // PurgeQueue Purge messages in internal queues.
+    PurgeQueue = int(C.RD_KAFKA_PURGE_F_QUEUE)
+
+    // PurgeNonBlocking Don't wait for background thread queue purging to finish.
+    PurgeNonBlocking = int(C.RD_KAFKA_PURGE_F_NON_BLOCKING)
+)
+
const (
+    // AlterOperationSet sets/overwrites the configuration setting.
+    AlterOperationSet = iota
+)
+

+ LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client +

const LibrdkafkaLinkInfo = "static glibc_linux_amd64 from librdkafka-static-bundle-v2.6.1.tgz"
+

+ OffsetBeginning represents the earliest offset (logical) +

const OffsetBeginning = Offset(C.RD_KAFKA_OFFSET_BEGINNING)
+

+ OffsetEnd represents the latest offset (logical) +

const OffsetEnd = Offset(C.RD_KAFKA_OFFSET_END)
+

+ OffsetInvalid represents an invalid/unspecified offset +

const OffsetInvalid = Offset(C.RD_KAFKA_OFFSET_INVALID)
+

+ OffsetStored represents a stored offset +

const OffsetStored = Offset(C.RD_KAFKA_OFFSET_STORED)
+

+ PartitionAny represents any partition (for partitioning), +or unspecified value (for all other cases) +

const PartitionAny = int32(C.RD_KAFKA_PARTITION_UA)
+

+ func + + LibraryVersion + + +

+
func LibraryVersion() (int, string)
+

+ LibraryVersion returns the underlying librdkafka library version as a +(version_int, version_str) tuple. +

+ func + + WriteErrorCodes + + +

+
func WriteErrorCodes(f *os.File)
+

+ WriteErrorCodes writes Go error code constants to file from the +librdkafka error codes. +This function is not intended for public use. +

+ type + + ACLBinding + + +

+

+ ACLBinding specifies the operation and permission type for a specific principal +over one or more resources of the same type. Used by `AdminClient.CreateACLs`, +returned by `AdminClient.DescribeACLs` and `AdminClient.DeleteACLs`. +

type ACLBinding struct {
+    Type ResourceType // The resource type.
+    // The resource name, which depends on the resource type.
+    // For ResourceBroker the resource name is the broker id.
+    Name                string
+    ResourcePatternType ResourcePatternType // The resource pattern, relative to the name.
+    Principal           string              // The principal this ACLBinding refers to.
+    Host                string              // The host that the call is allowed to come from.
+    Operation           ACLOperation        // The operation/s specified by this binding.
+    PermissionType      ACLPermissionType   // The permission type for the specified operation.
+}
+
+

+ type + + ACLBindingFilter + + +

+

+ ACLBindingFilter specifies a filter used to return a list of ACL bindings matching some or all of its attributes. +Used by `AdminClient.DescribeACLs` and `AdminClient.DeleteACLs`. +

type ACLBindingFilter = ACLBinding
+

+ type + + ACLBindingFilters + + +

+

+ ACLBindingFilters is a slice of ACLBindingFilter that also implements +the sort interface +

type ACLBindingFilters []ACLBindingFilter
+

+ type + + ACLBindings + + +

+

+ ACLBindings is a slice of ACLBinding that also implements +the sort interface +

type ACLBindings []ACLBinding
+

+ func (ACLBindings) + + Len + + +

+
func (a ACLBindings) Len() int
+

+ func (ACLBindings) + + Less + + +

+
func (a ACLBindings) Less(i, j int) bool
+

+ func (ACLBindings) + + Swap + + +

+
func (a ACLBindings) Swap(i, j int)
+

+ type + + ACLOperation + + +

+

+ ACLOperation enumerates the different types of ACL operation. +

type ACLOperation int
+
const (
+    // ACLOperationUnknown represents an unknown or unset operation
+    ACLOperationUnknown ACLOperation = C.RD_KAFKA_ACL_OPERATION_UNKNOWN
+    // ACLOperationAny in a filter, matches any ACLOperation
+    ACLOperationAny ACLOperation = C.RD_KAFKA_ACL_OPERATION_ANY
+    // ACLOperationAll represents all the operations
+    ACLOperationAll ACLOperation = C.RD_KAFKA_ACL_OPERATION_ALL
+    // ACLOperationRead a read operation
+    ACLOperationRead ACLOperation = C.RD_KAFKA_ACL_OPERATION_READ
+    // ACLOperationWrite represents a write operation
+    ACLOperationWrite ACLOperation = C.RD_KAFKA_ACL_OPERATION_WRITE
+    // ACLOperationCreate represents a create operation
+    ACLOperationCreate ACLOperation = C.RD_KAFKA_ACL_OPERATION_CREATE
+    // ACLOperationDelete represents a delete operation
+    ACLOperationDelete ACLOperation = C.RD_KAFKA_ACL_OPERATION_DELETE
+    // ACLOperationAlter represents an alter operation
+    ACLOperationAlter ACLOperation = C.RD_KAFKA_ACL_OPERATION_ALTER
+    // ACLOperationDescribe represents a describe operation
+    ACLOperationDescribe ACLOperation = C.RD_KAFKA_ACL_OPERATION_DESCRIBE
+    // ACLOperationClusterAction represents a cluster action operation
+    ACLOperationClusterAction ACLOperation = C.RD_KAFKA_ACL_OPERATION_CLUSTER_ACTION
+    // ACLOperationDescribeConfigs represents a describe configs operation
+    ACLOperationDescribeConfigs ACLOperation = C.RD_KAFKA_ACL_OPERATION_DESCRIBE_CONFIGS
+    // ACLOperationAlterConfigs represents an alter configs operation
+    ACLOperationAlterConfigs ACLOperation = C.RD_KAFKA_ACL_OPERATION_ALTER_CONFIGS
+    // ACLOperationIdempotentWrite represents an idempotent write operation
+    ACLOperationIdempotentWrite ACLOperation = C.RD_KAFKA_ACL_OPERATION_IDEMPOTENT_WRITE
+)
+

+ func + + ACLOperationFromString + + +

+
func ACLOperationFromString(aclOperationString string) (ACLOperation, error)
+

+ ACLOperationFromString translates a ACL operation name to +a ACLOperation value. +

+ func (ACLOperation) + + String + + +

+
func (o ACLOperation) String() string
+

+ String returns the human-readable representation of an ACLOperation +

+ type + + ACLPermissionType + + +

+

+ ACLPermissionType enumerates the different types of ACL permission types. +

type ACLPermissionType int
+
const (
+    // ACLPermissionTypeUnknown represents an unknown ACLPermissionType
+    ACLPermissionTypeUnknown ACLPermissionType = C.RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN
+    // ACLPermissionTypeAny in a filter, matches any ACLPermissionType
+    ACLPermissionTypeAny ACLPermissionType = C.RD_KAFKA_ACL_PERMISSION_TYPE_ANY
+    // ACLPermissionTypeDeny disallows access
+    ACLPermissionTypeDeny ACLPermissionType = C.RD_KAFKA_ACL_PERMISSION_TYPE_DENY
+    // ACLPermissionTypeAllow grants access
+    ACLPermissionTypeAllow ACLPermissionType = C.RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW
+)
+

+ func + + ACLPermissionTypeFromString + + +

+
func ACLPermissionTypeFromString(aclPermissionTypeString string) (ACLPermissionType, error)
+

+ ACLPermissionTypeFromString translates a ACL permission type name to +a ACLPermissionType value. +

+ func (ACLPermissionType) + + String + + +

+
func (o ACLPermissionType) String() string
+

+ String returns the human-readable representation of an ACLPermissionType +

+ type + + AdminClient + + +

+

+ AdminClient is derived from an existing Producer or Consumer +

type AdminClient struct {
+    // contains filtered or unexported fields
+}
+
+

+ func + + NewAdminClient + + +

+
func NewAdminClient(conf *ConfigMap) (*AdminClient, error)
+

+ NewAdminClient creats a new AdminClient instance with a new underlying client instance +

+ func + + NewAdminClientFromConsumer + + +

+
func NewAdminClientFromConsumer(c *Consumer) (a *AdminClient, err error)
+

+ NewAdminClientFromConsumer derives a new AdminClient from an existing Consumer instance. +The AdminClient will use the same configuration and connections as the parent instance. +

+ func + + NewAdminClientFromProducer + + +

+
func NewAdminClientFromProducer(p *Producer) (a *AdminClient, err error)
+

+ NewAdminClientFromProducer derives a new AdminClient from an existing Producer instance. +The AdminClient will use the same configuration and connections as the parent instance. +

+ func (*AdminClient) + + AlterConfigs + + +

+
func (a *AdminClient) AlterConfigs(ctx context.Context, resources []ConfigResource, options ...AlterConfigsAdminOption) (result []ConfigResourceResult, err error)
+

+ AlterConfigs alters/updates cluster resource configuration. +

+ Updates are not transactional so they may succeed for a subset +of the provided resources while others fail. +The configuration for a particular resource is updated atomically, +replacing values using the provided ConfigEntrys and reverting +unspecified ConfigEntrys to their default values. +

+ Requires broker version >=0.11.0.0 +

+ AlterConfigs will replace all existing configuration for +the provided resources with the new configuration given, +reverting all other configuration to their default values. +

+ Multiple resources and resource types may be set, but at most one +resource of type ResourceBroker is allowed per call since these +resource requests must be sent to the broker specified in the resource. +Deprecated: AlterConfigs is deprecated in favour of IncrementalAlterConfigs +

+ func (*AdminClient) + + AlterConsumerGroupOffsets + + +

+
func (a *AdminClient) AlterConsumerGroupOffsets(
+    ctx context.Context, groupsPartitions []ConsumerGroupTopicPartitions,
+    options ...AlterConsumerGroupOffsetsAdminOption) (acgor AlterConsumerGroupOffsetsResult, err error)
+

+ AlterConsumerGroupOffsets alters the offsets for topic partition(s) for +consumer group(s). +

+ Parameters: +

    +
  • + `ctx` - context with the maximum amount of time to block, or nil for +indefinite. +
  • + `groupsPartitions` - a slice of ConsumerGroupTopicPartitions, each element of +which has the id of a consumer group, and a slice of the TopicPartitions +we need to alter the offsets for. Currently, the size of +`groupsPartitions` has to be exactly one. +
  • + `options` - AlterConsumerGroupOffsetsAdminOption options. +
  • + + +
+

+ Returns a AlterConsumerGroupOffsetsResult, containing a slice of +ConsumerGroupTopicPartitions corresponding to the input slice, plus an error +that is not `nil` for client level errors. Individual TopicPartitions inside +each of the ConsumerGroupTopicPartitions should also be checked for errors. +This will succeed at the partition level only if the group is not actively +subscribed to the corresponding topic(s). +

+ func (*AdminClient) + + AlterUserScramCredentials + + +

+
func (a *AdminClient) AlterUserScramCredentials(
+    ctx context.Context, upsertions []UserScramCredentialUpsertion, deletions []UserScramCredentialDeletion,
+    options ...AlterUserScramCredentialsAdminOption) (result AlterUserScramCredentialsResult, err error)
+

+ AlterUserScramCredentials alters SASL/SCRAM credentials. +The pair (user, mechanism) must be unique among upsertions and deletions. +

+ Parameters: +

    +
  • + `ctx` - context with the maximum amount of time to block, or nil for +indefinite. +
  • + `upsertions` - a slice of user credential upsertions +
  • + `deletions` - a slice of user credential deletions +
  • + `options` - AlterUserScramCredentialsAdminOption options. +
  • + + + +
+

+ Returns a map from user name to the corresponding Error, with error code +ErrNoError when the request succeeded. +

+ func (*AdminClient) + + Close + + +

+
func (a *AdminClient) Close()
+

+ Close an AdminClient instance. +

+ func (*AdminClient) + + ClusterID + + +

+
func (a *AdminClient) ClusterID(ctx context.Context) (clusterID string, err error)
+

+ ClusterID returns the cluster ID as reported in broker metadata. +

+ Note on cancellation: Although the underlying C function respects the +timeout, it currently cannot be manually cancelled. That means manually +cancelling the context will block until the C function call returns. +

+ Requires broker version >= 0.10.0. +

+ func (*AdminClient) + + ControllerID + + +

+
func (a *AdminClient) ControllerID(ctx context.Context) (controllerID int32, err error)
+

+ ControllerID returns the broker ID of the current controller as reported in +broker metadata. +

+ Note on cancellation: Although the underlying C function respects the +timeout, it currently cannot be manually cancelled. That means manually +cancelling the context will block until the C function call returns. +

+ Requires broker version >= 0.10.0. +

+ func (*AdminClient) + + CreateACLs + + +

+
func (a *AdminClient) CreateACLs(ctx context.Context, aclBindings ACLBindings, options ...CreateACLsAdminOption) (result []CreateACLResult, err error)
+

+ CreateACLs creates one or more ACL bindings. +

+ Parameters: +

    +
  • + `ctx` - context with the maximum amount of time to block, or nil for indefinite. +
  • + `aclBindings` - A slice of ACL binding specifications to create. +
  • + `options` - Create ACLs options +
  • + + +
+

+ Returns a slice of CreateACLResult with a ErrNoError ErrorCode when the operation was successful +plus an error that is not nil for client level errors +

+ func (*AdminClient) + + CreatePartitions + + +

+
func (a *AdminClient) CreatePartitions(ctx context.Context, partitions []PartitionsSpecification, options ...CreatePartitionsAdminOption) (result []TopicResult, err error)
+

+ CreatePartitions creates additional partitions for topics. +

+ func (*AdminClient) + + CreateTopics + + +

+
func (a *AdminClient) CreateTopics(ctx context.Context, topics []TopicSpecification, options ...CreateTopicsAdminOption) (result []TopicResult, err error)
+

+ CreateTopics creates topics in cluster. +

+ The list of TopicSpecification objects define the per-topic partition count, replicas, etc. +

+ Topic creation is non-atomic and may succeed for some topics but fail for others, +make sure to check the result for topic-specific errors. +

+ Note: TopicSpecification is analogous to NewTopic in the Java Topic Admin API. +

+ func (*AdminClient) + + DeleteACLs + + +

+
func (a *AdminClient) DeleteACLs(ctx context.Context, aclBindingFilters ACLBindingFilters, options ...DeleteACLsAdminOption) (result []DeleteACLsResult, err error)
+

+ DeleteACLs deletes ACL bindings matching one or more ACL binding filters. +

+ Parameters: +

    +
  • + `ctx` - context with the maximum amount of time to block, or nil for indefinite. +
  • + `aclBindingFilters` - a slice of ACL binding filters to match ACLs to delete. +string attributes match exact values or any string if set to empty string. +Enum attributes match exact values or any value if ending with `Any`. +If `ResourcePatternType` is set to `ResourcePatternTypeMatch` deletes ACL bindings with: +
  • + `ResourcePatternTypeLiteral` pattern type with resource name equal to the given resource name +
  • + `ResourcePatternTypeLiteral` pattern type with wildcard resource name that matches the given resource name +
  • + `ResourcePatternTypePrefixed` pattern type with resource name that is a prefix of the given resource name +
  • + `options` - Delete ACLs options +
  • + + + + + +
+

+ Returns a slice of ACLBinding for each filter when the operation was successful +plus an error that is not `nil` for client level errors +

+ func (*AdminClient) + + DeleteConsumerGroups + + +

+
func (a *AdminClient) DeleteConsumerGroups(
+    ctx context.Context,
+    groups []string, options ...DeleteConsumerGroupsAdminOption) (result DeleteConsumerGroupsResult, err error)
+

+ DeleteConsumerGroups deletes a batch of consumer groups. +Parameters: +

    +
  • + `ctx` - context with the maximum amount of time to block, or nil for +indefinite. +
  • + `groups` - A slice of groupIDs to delete. +
  • + `options` - DeleteConsumerGroupsAdminOption options. +
  • + + +
+

+ Returns a DeleteConsumerGroupsResult containing a slice of ConsumerGroupResult, with +group-level errors, (if any) contained inside; and an error that is not nil +for client level errors. +

+ func (*AdminClient) + + DeleteRecords + + +

+
func (a *AdminClient) DeleteRecords(ctx context.Context,
+    recordsToDelete []TopicPartition,
+    options ...DeleteRecordsAdminOption) (result DeleteRecordsResults, err error)
+

+ DeleteRecords deletes records (messages) in topic partitions older than the offsets provided. +

+ Parameters: +

    +
  • + `ctx` - context with the maximum amount of time to block, or nil for +indefinite. +
  • + `recordsToDelete` - A slice of TopicPartitions with the offset field set. +For each partition, delete all messages up to but not including the specified offset. +The offset could be set to kafka.OffsetEnd to delete all the messages in the partition. +
  • + `options` - DeleteRecordsAdminOptions options. +
  • + + +
+

+ Returns a DeleteRecordsResults, which contains a slice of +DeleteRecordsResult, each representing the result for one topic partition. +Individual TopicPartitions inside the DeleteRecordsResult should be checked for errors. +If successful, the DeletedRecords within the DeleteRecordsResult will be non-nil, +and contain the low-watermark offset (smallest available offset of all live replicas). +

+ func (*AdminClient) + + DeleteTopics + + +

+
func (a *AdminClient) DeleteTopics(ctx context.Context, topics []string, options ...DeleteTopicsAdminOption) (result []TopicResult, err error)
+

+ DeleteTopics deletes a batch of topics. +

+ This operation is not transactional and may succeed for a subset of topics while +failing others. +It may take several seconds after the DeleteTopics result returns success for +all the brokers to become aware that the topics are gone. During this time, +topic metadata and configuration may continue to return information about deleted topics. +

+ Requires broker version >= 0.10.1.0 +

+ func (*AdminClient) + + DescribeACLs + + +

+
func (a *AdminClient) DescribeACLs(ctx context.Context, aclBindingFilter ACLBindingFilter, options ...DescribeACLsAdminOption) (result *DescribeACLsResult, err error)
+

+ DescribeACLs matches ACL bindings by filter. +

+ Parameters: +

    +
  • + `ctx` - context with the maximum amount of time to block, or nil for indefinite. +
  • + `aclBindingFilter` - A filter with attributes that must match. +string attributes match exact values or any string if set to empty string. +Enum attributes match exact values or any value if ending with `Any`. +If `ResourcePatternType` is set to `ResourcePatternTypeMatch` returns ACL bindings with: +
  • + `ResourcePatternTypeLiteral` pattern type with resource name equal to the given resource name +
  • + `ResourcePatternTypeLiteral` pattern type with wildcard resource name that matches the given resource name +
  • + `ResourcePatternTypePrefixed` pattern type with resource name that is a prefix of the given resource name +
  • + `options` - Describe ACLs options +
  • + + + + + +
+

+ Returns a slice of ACLBindings when the operation was successful +plus an error that is not `nil` for client level errors +

+ func (*AdminClient) + + DescribeCluster + + +

+
func (a *AdminClient) DescribeCluster(
+    ctx context.Context,
+    options ...DescribeClusterAdminOption) (result DescribeClusterResult, err error)
+

+ DescribeCluster describes the cluster +

+ Parameters: +

    +
  • + `ctx` - context with the maximum amount of time to block, or nil for +indefinite. +
  • + `options` - DescribeClusterAdminOption options. +
  • + +
+

+ Returns ClusterDescription, which contains current cluster ID and controller +along with a slice of Nodes. It also has a slice of allowed ACLOperations. +

+ func (*AdminClient) + + DescribeConfigs + + +

+
func (a *AdminClient) DescribeConfigs(ctx context.Context, resources []ConfigResource, options ...DescribeConfigsAdminOption) (result []ConfigResourceResult, err error)
+

+ DescribeConfigs retrieves configuration for cluster resources. +

+ The returned configuration includes default values, use +ConfigEntryResult.IsDefault or ConfigEntryResult.Source to distinguish +default values from manually configured settings. +

+ The value of config entries where .IsSensitive is true +will always be nil to avoid disclosing sensitive +information, such as security settings. +

+ Configuration entries where .IsReadOnly is true can't be modified +(with AlterConfigs). +

+ Synonym configuration entries are returned if the broker supports +it (broker version >= 1.1.0). See .Synonyms. +

+ Requires broker version >=0.11.0.0 +

+ Multiple resources and resource types may be requested, but at most +one resource of type ResourceBroker is allowed per call +since these resource requests must be sent to the broker specified +in the resource. +

+ func (*AdminClient) + + DescribeConsumerGroups + + +

+
func (a *AdminClient) DescribeConsumerGroups(
+    ctx context.Context, groups []string,
+    options ...DescribeConsumerGroupsAdminOption) (result DescribeConsumerGroupsResult, err error)
+

+ DescribeConsumerGroups describes groups from cluster as specified by the +groups list. +

+ Parameters: +

    +
  • + `ctx` - context with the maximum amount of time to block, or nil for +indefinite. +
  • + `groups` - Slice of groups to describe. This should not be nil/empty. +
  • + `options` - DescribeConsumerGroupsAdminOption options. +
  • + + +
+

+ Returns DescribeConsumerGroupsResult, which contains a slice of +ConsumerGroupDescriptions corresponding to the input groups, plus an error +that is not `nil` for client level errors. Individual +ConsumerGroupDescriptions inside the slice should also be checked for +errors. +

+ func (*AdminClient) + + DescribeTopics + + +

+
func (a *AdminClient) DescribeTopics(
+    ctx context.Context, topics TopicCollection,
+    options ...DescribeTopicsAdminOption) (result DescribeTopicsResult, err error)
+

+ DescribeTopics describes topics from cluster as specified by the +topics list. +

+ Parameters: +

    +
  • + `ctx` - context with the maximum amount of time to block, or nil for +indefinite. +
  • + `topics` - Collection of topics to describe. This should not have nil +topic names. +
  • + `options` - DescribeTopicsAdminOption options. +
  • + + +
+

+ Returns DescribeTopicsResult, which contains a slice of +TopicDescriptions corresponding to the input topics, plus an error +that is not `nil` for client level errors. Individual +TopicDescriptions inside the slice should also be checked for +errors. Individual TopicDescriptions also have a +slice of allowed ACLOperations. +

+ func (*AdminClient) + + DescribeUserScramCredentials + + +

+
func (a *AdminClient) DescribeUserScramCredentials(
+    ctx context.Context, users []string,
+    options ...DescribeUserScramCredentialsAdminOption) (result DescribeUserScramCredentialsResult, err error)
+

+ DescribeUserScramCredentials describe SASL/SCRAM credentials for the +specified user names. +

+ Parameters: +

    +
  • + `ctx` - context with the maximum amount of time to block, or nil for +indefinite. +
  • + `users` - a slice of string, each one correspond to a user name, no +duplicates are allowed +
  • + `options` - DescribeUserScramCredentialsAdminOption options. +
  • + + +
+

+ Returns a map from user name to user SCRAM credentials description. +Each description can have an individual error. +

+ func (*AdminClient) + + ElectLeaders + + +

+
func (a *AdminClient) ElectLeaders(ctx context.Context, electLeaderRequest ElectLeadersRequest, options ...ElectLeadersAdminOption) (result ElectLeadersResult, err error)
+

+ ElectLeaders performs Preferred or Unclean Elections for the specified topic Partitions or for all of them. +

+ Parameters: +

    +
  • + `ctx` - context with the maximum amount of time to block, or nil for +indefinite. +
  • + `electLeaderRequest` - ElectLeadersRequest containing the election type +and the partitions to elect leaders for or nil for election in all the +partitions. +
  • + `options` - ElectLeadersAdminOption options. +
  • + + +
+

+ Returns ElectLeadersResult, which contains a slice of TopicPartitions containing the partitions for which the leader election was performed. +If we are passing partitions as nil, the broker will perform leader elections for all partitions, +but the results will only contain partitions for which there was an election or resulted in an error. +Individual TopicPartitions inside the ElectLeadersResult should be checked for errors. +Additionally, an error that is not nil for client-level errors is returned. +

+ func (*AdminClient) + + GetMetadata + + +

+
func (a *AdminClient) GetMetadata(topic *string, allTopics bool, timeoutMs int) (*Metadata, error)
+

+ GetMetadata queries broker for cluster and topic metadata. +If topic is non-nil only information about that topic is returned, else if +allTopics is false only information about locally used topics is returned, +else information about all topics is returned. +GetMetadata is equivalent to listTopics, describeTopics and describeCluster in the Java API. +

+ func (*AdminClient) + + IncrementalAlterConfigs + + +

+
func (a *AdminClient) IncrementalAlterConfigs(ctx context.Context, resources []ConfigResource, options ...AlterConfigsAdminOption) (result []ConfigResourceResult, err error)
+

+ IncrementalAlterConfigs alters/updates cluster resource configuration. +

+ Updates are not transactional so they may succeed for some resources +while fail for others. The configs for a particular resource are +updated atomically, executing the corresponding incremental +operations on the provided configurations. +

+ Requires broker version >=2.3.0 +

+ IncrementalAlterConfigs will only change configurations for provided +resources with the new configuration given. +

+ Multiple resources and resource types may be set, but at most one +resource of type ResourceBroker is allowed per call since these +resource requests must be sent to the broker specified in the resource. +

+ func (*AdminClient) + + IsClosed + + +

+
func (a *AdminClient) IsClosed() bool
+

+ IsClosed returns boolean representing if client is closed or not +

+ func (*AdminClient) + + ListConsumerGroupOffsets + + +

+
func (a *AdminClient) ListConsumerGroupOffsets(
+    ctx context.Context, groupsPartitions []ConsumerGroupTopicPartitions,
+    options ...ListConsumerGroupOffsetsAdminOption) (lcgor ListConsumerGroupOffsetsResult, err error)
+

+ ListConsumerGroupOffsets fetches the offsets for topic partition(s) for +consumer group(s). +

+ Parameters: +

    +
  • + `ctx` - context with the maximum amount of time to block, or nil for indefinite. +
  • + `groupsPartitions` - a slice of ConsumerGroupTopicPartitions, each element of which +has the id of a consumer group, and a slice of the TopicPartitions we +need to fetch the offsets for. The slice of TopicPartitions can be nil, to fetch +all topic partitions for that group. +Currently, the size of `groupsPartitions` has to be exactly one. +
  • + `options` - ListConsumerGroupOffsetsAdminOption options. +
  • + + +
+

+ Returns a ListConsumerGroupOffsetsResult, containing a slice of +ConsumerGroupTopicPartitions corresponding to the input slice, plus an error that is +not `nil` for client level errors. Individual TopicPartitions inside each of +the ConsumerGroupTopicPartitions should also be checked for errors. +

+ func (*AdminClient) + + ListConsumerGroups + + +

+
func (a *AdminClient) ListConsumerGroups(
+    ctx context.Context,
+    options ...ListConsumerGroupsAdminOption) (result ListConsumerGroupsResult, err error)
+

+ ListConsumerGroups lists the consumer groups available in the cluster. +

+ Parameters: +

    +
  • + `ctx` - context with the maximum amount of time to block, or nil for +indefinite. +
  • + `options` - ListConsumerGroupsAdminOption options. +
  • + +
+

+ Returns a ListConsumerGroupsResult, which contains a slice corresponding to +each group in the cluster and a slice of errors encountered while listing. +Additionally, an error that is not nil for client-level errors is returned. +Both the returned error, and the errors slice should be checked. +

+ func (*AdminClient) + + ListOffsets + + +

+
func (a *AdminClient) ListOffsets(
+    ctx context.Context, topicPartitionOffsets map[TopicPartition]OffsetSpec,
+    options ...ListOffsetsAdminOption) (result ListOffsetsResult, err error)
+

+ ListOffsets describe offsets for the +specified TopicPartiton based on an OffsetSpec. +

+ Parameters: +

    +
  • + `ctx` - context with the maximum amount of time to block, or nil for +indefinite. +
  • + `topicPartitionOffsets` - a map from TopicPartition to OffsetSpec, it +holds either the OffsetSpec enum value or timestamp. Must not be nil. +
  • + `options` - ListOffsetsAdminOption options. +
  • + + +
+

+ Returns a ListOffsetsResult. +Each TopicPartition's ListOffset can have an individual error. +

+ func (*AdminClient) + + SetOAuthBearerToken + + +

+
func (a *AdminClient) SetOAuthBearerToken(oauthBearerToken OAuthBearerToken) error
+

+ SetOAuthBearerToken sets the the data to be transmitted +to a broker during SASL/OAUTHBEARER authentication. It will return nil +on success, otherwise an error if: +1) the token data is invalid (meaning an expiration time in the past +or either a token value or an extension key or value that does not meet +the regular expression requirements as per + + https://tools.ietf.org/html/rfc7628#section-3.1 + + ); +2) SASL/OAUTHBEARER is not supported by the underlying librdkafka build; +3) SASL/OAUTHBEARER is supported but is not configured as the client's +authentication mechanism. +

+ func (*AdminClient) + + SetOAuthBearerTokenFailure + + +

+
func (a *AdminClient) SetOAuthBearerTokenFailure(errstr string) error
+

+ SetOAuthBearerTokenFailure sets the error message describing why token +retrieval/setting failed; it also schedules a new token refresh event for 10 +seconds later so the attempt may be retried. It will return nil on +success, otherwise an error if: +1) SASL/OAUTHBEARER is not supported by the underlying librdkafka build; +2) SASL/OAUTHBEARER is supported but is not configured as the client's +authentication mechanism. +

+ func (*AdminClient) + + SetSaslCredentials + + +

+
func (a *AdminClient) SetSaslCredentials(username, password string) error
+

+ SetSaslCredentials sets the SASL credentials used for this admin client. +The new credentials will overwrite the old ones (which were set when creating +the admin client or by a previous call to SetSaslCredentials). The new +credentials will be used the next time the admin client needs to authenticate +to a broker. This method will not disconnect existing broker connections that +were established with the old credentials. +This method applies only to the SASL PLAIN and SCRAM mechanisms. +

+ func (*AdminClient) + + String + + +

+
func (a *AdminClient) String() string
+

+ String returns a human readable name for an AdminClient instance +

+ type + + AdminOption + + +

+

+ AdminOption is a generic type not to be used directly. +

+ See CreateTopicsAdminOption et.al. +

type AdminOption interface {
+    // contains filtered or unexported methods
+}
+

+ type + + AdminOptionIncludeAuthorizedOperations + + +

+

+ AdminOptionIncludeAuthorizedOperations decides if the broker should return +authorized operations. +

+ Default: false +

+ Valid for DescribeConsumerGroups, DescribeTopics, DescribeCluster. +

type AdminOptionIncludeAuthorizedOperations struct {
+    // contains filtered or unexported fields
+}
+
+

+ func + + SetAdminOptionIncludeAuthorizedOperations + + +

+
func SetAdminOptionIncludeAuthorizedOperations(val bool) (ao AdminOptionIncludeAuthorizedOperations)
+

+ SetAdminOptionIncludeAuthorizedOperations decides if the broker should return +authorized operations. +

+ Default: false +

+ Valid for DescribeConsumerGroups, DescribeTopics, DescribeCluster. +

+ type + + AdminOptionIsolationLevel + + +

+

+ AdminOptionIsolationLevel sets the overall request IsolationLevel. +

+ Default: `ReadUncommitted`. +

+ Valid for ListOffsets. +

type AdminOptionIsolationLevel struct {
+    // contains filtered or unexported fields
+}
+
+

+ func + + SetAdminIsolationLevel + + +

+
func SetAdminIsolationLevel(isolationLevel IsolationLevel) (ao AdminOptionIsolationLevel)
+

+ SetAdminIsolationLevel sets the overall IsolationLevel for a request. +

+ Default: `ReadUncommitted`. +

+ Valid for ListOffsets. +

+ type + + AdminOptionMatchConsumerGroupStates + + +

+

+ AdminOptionMatchConsumerGroupStates decides groups in which state(s) should be +listed. +

+ Default: nil (lists groups in all states). +

+ Valid for ListConsumerGroups. +

type AdminOptionMatchConsumerGroupStates struct {
+    // contains filtered or unexported fields
+}
+
+

+ func + + SetAdminMatchConsumerGroupStates + + +

+
func SetAdminMatchConsumerGroupStates(val []ConsumerGroupState) (ao AdminOptionMatchConsumerGroupStates)
+

+ SetAdminMatchConsumerGroupStates sets the state(s) that must be +listed. +

+ Default: nil (lists groups in all states). +

+ Valid for ListConsumerGroups. +

+ type + + AdminOptionMatchConsumerGroupTypes + + +

+

+ AdminOptionMatchConsumerGroupTypes decides the type(s) that must be +listed. +

+ Default: nil (lists groups of all types). +

+ Valid for ListConsumerGroups. +

type AdminOptionMatchConsumerGroupTypes struct {
+    // contains filtered or unexported fields
+}
+
+

+ func + + SetAdminMatchConsumerGroupTypes + + +

+
func SetAdminMatchConsumerGroupTypes(val []ConsumerGroupType) (ao AdminOptionMatchConsumerGroupTypes)
+

+ SetAdminMatchConsumerGroupTypes set the type(s) that must be +listed. +

+ Default: nil (lists groups of all types). +

+ Valid for ListConsumerGroups. +

+ type + + AdminOptionOperationTimeout + + +

+

+ AdminOptionOperationTimeout sets the broker's operation timeout, such as the +timeout for CreateTopics to complete the creation of topics on the controller +before returning a result to the application. +

+ CreateTopics, DeleteTopics, CreatePartitions: +a value 0 will return immediately after triggering topic +creation, while > 0 will wait this long for topic creation to propagate +in cluster. +

+ Default: 0 (return immediately). +

+ Valid for CreateTopics, DeleteTopics, CreatePartitions. +

type AdminOptionOperationTimeout struct {
+    // contains filtered or unexported fields
+}
+
+

+ func + + SetAdminOperationTimeout + + +

+
func SetAdminOperationTimeout(t time.Duration) (ao AdminOptionOperationTimeout)
+

+ SetAdminOperationTimeout sets the broker's operation timeout, such as the +timeout for CreateTopics to complete the creation of topics on the controller +before returning a result to the application. +

+ CreateTopics, DeleteTopics, CreatePartitions: +a value 0 will return immediately after triggering topic +creation, while > 0 will wait this long for topic creation to propagate +in cluster. +

+ Default: 0 (return immediately). +

+ Valid for CreateTopics, DeleteTopics, CreatePartitions. +

+ type + + AdminOptionRequestTimeout + + +

+

+ AdminOptionRequestTimeout sets the overall request timeout, including broker +lookup, request transmission, operation time on broker, and response. +

+ Default: `socket.timeout.ms`. +

+ Valid for all Admin API methods. +

type AdminOptionRequestTimeout struct {
+    // contains filtered or unexported fields
+}
+
+

+ func + + SetAdminRequestTimeout + + +

+
func SetAdminRequestTimeout(t time.Duration) (ao AdminOptionRequestTimeout)
+

+ SetAdminRequestTimeout sets the overall request timeout, including broker +lookup, request transmission, operation time on broker, and response. +

+ Default: `socket.timeout.ms`. +

+ Valid for all Admin API methods. +

+ type + + AdminOptionRequireStableOffsets + + +

+

+ AdminOptionRequireStableOffsets decides if the broker should return stable +offsets (transaction-committed). +

+ Default: false +

+ Valid for ListConsumerGroupOffsets. +

type AdminOptionRequireStableOffsets struct {
+    // contains filtered or unexported fields
+}
+
+

+ func + + SetAdminRequireStableOffsets + + +

+
func SetAdminRequireStableOffsets(val bool) (ao AdminOptionRequireStableOffsets)
+

+ SetAdminRequireStableOffsets decides if the broker should return stable +offsets (transaction-committed). +

+ Default: false +

+ Valid for ListConsumerGroupOffsets. +

+ type + + AdminOptionValidateOnly + + +

+

+ AdminOptionValidateOnly tells the broker to only validate the request, +without performing the requested operation (create topics, etc). +

+ Default: false. +

+ Valid for CreateTopics, CreatePartitions, AlterConfigs +

type AdminOptionValidateOnly struct {
+    // contains filtered or unexported fields
+}
+
+

+ func + + SetAdminValidateOnly + + +

+
func SetAdminValidateOnly(validateOnly bool) (ao AdminOptionValidateOnly)
+

+ SetAdminValidateOnly tells the broker to only validate the request, +without performing the requested operation (create topics, etc). +

+ Default: false. +

+ Valid for CreateTopics, DeleteTopics, CreatePartitions, AlterConfigs +

+ type + + AlterConfigOpType + + +

+

+ AlterConfigOpType specifies the operation to perform +on the ConfigEntry for IncrementalAlterConfig +

type AlterConfigOpType int
+
const (
+    // AlterConfigOpTypeSet sets/overwrites the configuration
+    // setting.
+    AlterConfigOpTypeSet AlterConfigOpType = C.RD_KAFKA_ALTER_CONFIG_OP_TYPE_SET
+    // AlterConfigOpTypeDelete sets the configuration setting
+    // to default or NULL.
+    AlterConfigOpTypeDelete AlterConfigOpType = C.RD_KAFKA_ALTER_CONFIG_OP_TYPE_DELETE
+    // AlterConfigOpTypeAppend appends the value to existing
+    // configuration settings.
+    AlterConfigOpTypeAppend AlterConfigOpType = C.RD_KAFKA_ALTER_CONFIG_OP_TYPE_APPEND
+    // AlterConfigOpTypeSubtract subtracts the value from
+    // existing configuration settings.
+    AlterConfigOpTypeSubtract AlterConfigOpType = C.RD_KAFKA_ALTER_CONFIG_OP_TYPE_SUBTRACT
+)
+

+ func (AlterConfigOpType) + + String + + +

+
func (o AlterConfigOpType) String() string
+

+ String returns the human-readable representation of an AlterOperation +

+ type + + AlterConfigsAdminOption + + +

+

+ AlterConfigsAdminOption - see setters. +

+ See SetAdminRequestTimeout, SetAdminValidateOnly, SetAdminIncremental. +

type AlterConfigsAdminOption interface {
+    // contains filtered or unexported methods
+}
+

+ type + + AlterConsumerGroupOffsetsAdminOption + + +

+

+ AlterConsumerGroupOffsetsAdminOption - see setter. +

+ See SetAdminRequestTimeout. +

type AlterConsumerGroupOffsetsAdminOption interface {
+    // contains filtered or unexported methods
+}
+

+ type + + AlterConsumerGroupOffsetsResult + + +

+

+ AlterConsumerGroupOffsetsResult represents the result of a +AlterConsumerGroupOffsets operation. +

type AlterConsumerGroupOffsetsResult struct {
+    // A slice of ConsumerGroupTopicPartitions, each element represents a group's
+    // TopicPartitions and Offsets.
+    ConsumerGroupsTopicPartitions []ConsumerGroupTopicPartitions
+}
+
+

+ type + + AlterOperation + + +

+

+ AlterOperation specifies the operation to perform on the ConfigEntry. +Currently only AlterOperationSet. +

type AlterOperation int
+

+ func (AlterOperation) + + String + + +

+
func (o AlterOperation) String() string
+

+ String returns the human-readable representation of an AlterOperation +

+ type + + AlterUserScramCredentialsAdminOption + + +

+

+ AlterUserScramCredentialsAdminOption - see setter. +

+ See SetAdminRequestTimeout. +

type AlterUserScramCredentialsAdminOption interface {
+    // contains filtered or unexported methods
+}
+

+ type + + AlterUserScramCredentialsResult + + +

+

+ AlterUserScramCredentialsResult represents the result of a +AlterUserScramCredentials call. +

type AlterUserScramCredentialsResult struct {
+    // Errors - Map from user name
+    // to an Error, with ErrNoError code on success.
+    Errors map[string]Error
+}
+
+

+ type + + AssignedPartitions + + +

+

+ AssignedPartitions consumer group rebalance event: assigned partition set +

type AssignedPartitions struct {
+    Partitions []TopicPartition
+}
+
+

+ func (AssignedPartitions) + + String + + +

+
func (e AssignedPartitions) String() string
+

+ type + + BrokerMetadata + + +

+

+ BrokerMetadata contains per-broker metadata +

type BrokerMetadata struct {
+    ID   int32
+    Host string
+    Port int
+}
+
+

+ type + + ConfigEntry + + +

+

+ ConfigEntry holds parameters for altering a resource's configuration. +

type ConfigEntry struct {
+    // Name of configuration entry, e.g., topic configuration property name.
+    Name string
+    // Value of configuration entry.
+    Value string
+    // Deprecated: Operation to perform on the entry.
+    Operation AlterOperation
+    // Operation to perform on the entry incrementally.
+    IncrementalOperation AlterConfigOpType
+}
+
+

+ func + + StringMapToConfigEntries + + +

+
func StringMapToConfigEntries(stringMap map[string]string, operation AlterOperation) []ConfigEntry
+

+ StringMapToConfigEntries creates a new map of ConfigEntry objects from the +provided string map. The AlterOperation is set on each created entry. +

+ func + + StringMapToIncrementalConfigEntries + + +

+
func StringMapToIncrementalConfigEntries(stringMap map[string]string,
+    operationMap map[string]AlterConfigOpType) []ConfigEntry
+

+ StringMapToIncrementalConfigEntries creates a new map of ConfigEntry objects from the +provided string map an operation map. The AlterConfigOpType is set on each created entry. +

+ func (ConfigEntry) + + String + + +

+
func (c ConfigEntry) String() string
+

+ String returns a human-readable representation of a ConfigEntry. +

+ type + + ConfigEntryResult + + +

+

+ ConfigEntryResult contains the result of a single configuration entry from a +DescribeConfigs request. +

type ConfigEntryResult struct {
+    // Name of configuration entry, e.g., topic configuration property name.
+    Name string
+    // Value of configuration entry.
+    Value string
+    // Source indicates the configuration source.
+    Source ConfigSource
+    // IsReadOnly indicates whether the configuration entry can be altered.
+    IsReadOnly bool
+    // IsDefault indicates whether the value is at its default.
+    IsDefault bool
+    // IsSensitive indicates whether the configuration entry contains sensitive information, in which case the value will be unset.
+    IsSensitive bool
+    // IsSynonym indicates whether the configuration entry is a synonym for another configuration property.
+    IsSynonym bool
+    // Synonyms contains a map of configuration entries that are synonyms to this configuration entry.
+    Synonyms map[string]ConfigEntryResult
+}
+
+

+ func (ConfigEntryResult) + + String + + +

+
func (c ConfigEntryResult) String() string
+

+ String returns a human-readable representation of a ConfigEntryResult. +

+ type + + ConfigMap + + +

+

+ ConfigMap is a map containing standard librdkafka configuration properties as documented in: + + https://github.com/confluentinc/librdkafka/tree/master/CONFIGURATION.md + +

+ The special property "default.topic.config" (optional) is a ConfigMap +containing default topic configuration properties. +

+ The use of "default.topic.config" is deprecated, +topic configuration properties shall be specified in the standard ConfigMap. +For backwards compatibility, "default.topic.config" (if supplied) +takes precedence. +

type ConfigMap map[string]ConfigValue
+

+ func (ConfigMap) + + Get + + +

+
func (m ConfigMap) Get(key string, defval ConfigValue) (ConfigValue, error)
+

+ Get finds the given key in the ConfigMap and returns its value. +If the key is not found `defval` is returned. +If the key is found but the type does not match that of `defval` (unless nil) +an ErrInvalidArg error is returned. +

+ func (ConfigMap) + + Set + + +

+
func (m ConfigMap) Set(kv string) error
+

+ Set implements flag.Set (command line argument parser) as a convenience +for `-X key=value` config. +

+ func (ConfigMap) + + SetKey + + +

+
func (m ConfigMap) SetKey(key string, value ConfigValue) error
+

+ SetKey sets configuration property key to value. +

+ For user convenience a key prefixed with {topic}. will be +set on the "default.topic.config" sub-map, this use is deprecated. +

+ type + + ConfigResource + + +

+

+ ConfigResource holds parameters for altering an Apache Kafka configuration resource +

type ConfigResource struct {
+    // Type of resource to set.
+    Type ResourceType
+    // Name of resource to set.
+    Name string
+    // Config entries to set.
+    // Configuration updates are atomic, any configuration property not provided
+    // here will be reverted (by the broker) to its default value.
+    // Use DescribeConfigs to retrieve the list of current configuration entry values.
+    Config []ConfigEntry
+}
+
+

+ func (ConfigResource) + + String + + +

+
func (c ConfigResource) String() string
+

+ String returns a human-readable representation of a ConfigResource +

+ type + + ConfigResourceResult + + +

+

+ ConfigResourceResult provides the result for a resource from a AlterConfigs or +DescribeConfigs request. +

type ConfigResourceResult struct {
+    // Type of returned result resource.
+    Type ResourceType
+    // Name of returned result resource.
+    Name string
+    // Error, if any, of returned result resource.
+    Error Error
+    // Config entries, if any, of returned result resource.
+    Config map[string]ConfigEntryResult
+}
+
+

+ func (ConfigResourceResult) + + String + + +

+
func (c ConfigResourceResult) String() string
+

+ String returns a human-readable representation of a ConfigResourceResult. +

+ type + + ConfigSource + + +

+

+ ConfigSource represents an Apache Kafka config source +

type ConfigSource int
+
const (
+    // ConfigSourceUnknown is the default value
+    ConfigSourceUnknown ConfigSource = C.RD_KAFKA_CONFIG_SOURCE_UNKNOWN_CONFIG
+    // ConfigSourceDynamicTopic is dynamic topic config that is configured for a specific topic
+    ConfigSourceDynamicTopic ConfigSource = C.RD_KAFKA_CONFIG_SOURCE_DYNAMIC_TOPIC_CONFIG
+    // ConfigSourceDynamicBroker is dynamic broker config that is configured for a specific broker
+    ConfigSourceDynamicBroker ConfigSource = C.RD_KAFKA_CONFIG_SOURCE_DYNAMIC_BROKER_CONFIG
+    // ConfigSourceDynamicDefaultBroker is dynamic broker config that is configured as default for all brokers in the cluster
+    ConfigSourceDynamicDefaultBroker ConfigSource = C.RD_KAFKA_CONFIG_SOURCE_DYNAMIC_DEFAULT_BROKER_CONFIG
+    // ConfigSourceStaticBroker is static broker config provided as broker properties at startup (e.g. from server.properties file)
+    ConfigSourceStaticBroker ConfigSource = C.RD_KAFKA_CONFIG_SOURCE_STATIC_BROKER_CONFIG
+    // ConfigSourceDefault is built-in default configuration for configs that have a default value
+    ConfigSourceDefault ConfigSource = C.RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG
+)
+

+ func (ConfigSource) + + String + + +

+
func (t ConfigSource) String() string
+

+ String returns the human-readable representation of a ConfigSource type +

+ type + + ConfigValue + + +

+

+ ConfigValue supports the following types: +

bool, int, string, any type with the standard String() interface
+
+
type ConfigValue interface{}
+

+ type + + Consumer + + +

+

+ Consumer implements a High-level Apache Kafka Consumer instance +

type Consumer struct {
+    // contains filtered or unexported fields
+}
+
+

+ func + + NewConsumer + + +

+
func NewConsumer(conf *ConfigMap) (*Consumer, error)
+

+ NewConsumer creates a new high-level Consumer instance. +

+ conf is a *ConfigMap with standard librdkafka configuration properties. +

+ Supported special configuration properties: +

go.application.rebalance.enable (bool, false) - Forward rebalancing responsibility to application via the Events() channel.
+                                     If set to true the app must handle the AssignedPartitions and
+                                     RevokedPartitions events and call Assign() and Unassign()
+                                     respectively.
+go.events.channel.enable (bool, false) - [deprecated] Enable the Events() channel. Messages and events will be pushed on the Events() channel and the Poll() interface will be disabled.
+go.events.channel.size (int, 1000) - Events() channel size
+go.logs.channel.enable (bool, false) - Forward log to Logs() channel.
+go.logs.channel (chan kafka.LogEvent, nil) - Forward logs to application-provided channel instead of Logs(). Requires go.logs.channel.enable=true.
+
+

+ WARNING: Due to the buffering nature of channels (and queues in general) the +use of the events channel risks receiving outdated events and +messages. Minimizing go.events.channel.size reduces the risk +and number of outdated events and messages but does not eliminate +the factor completely. With a channel size of 1 at most one +event or message may be outdated. +

+ func (*Consumer) + + Assign + + +

+
func (c *Consumer) Assign(partitions []TopicPartition) (err error)
+

+ Assign an atomic set of partitions to consume. +

+ The .Offset field of each TopicPartition must either be set to an absolute +starting offset (>= 0), or one of the logical offsets (`kafka.OffsetEnd` etc), +but should typically be set to `kafka.OffsetStored` to have the consumer +use the committed offset as a start position, with a fallback to +`auto.offset.reset` if there is no committed offset. +

+ This replaces the current assignment. +

+ func (*Consumer) + + Assignment + + +

+
func (c *Consumer) Assignment() (partitions []TopicPartition, err error)
+

+ Assignment returns the current partition assignments +

+ func (*Consumer) + + AssignmentLost + + +

+
func (c *Consumer) AssignmentLost() bool
+

+ AssignmentLost returns true if current partition assignment has been lost. +This method is only applicable for use with a subscribing consumer when +handling a rebalance event or callback. +Partitions that have been lost may already be owned by other members in the +group and therefore commiting offsets, for example, may fail. +

+ func (*Consumer) + + Close + + +

+
func (c *Consumer) Close() (err error)
+

+ Close Consumer instance. +The object is no longer usable after this call. +

+ func (*Consumer) + + Commit + + +

+
func (c *Consumer) Commit() ([]TopicPartition, error)
+

+ Commit offsets for currently assigned partitions +This is a blocking call. +Returns the committed offsets on success. +

+ func (*Consumer) + + CommitMessage + + +

+
func (c *Consumer) CommitMessage(m *Message) ([]TopicPartition, error)
+

+ CommitMessage commits offset based on the provided message. +This is a blocking call. +Returns the committed offsets on success. +

+ func (*Consumer) + + CommitOffsets + + +

+
func (c *Consumer) CommitOffsets(offsets []TopicPartition) ([]TopicPartition, error)
+

+ CommitOffsets commits the provided list of offsets +This is a blocking call. +Returns the committed offsets on success. +

+ func (*Consumer) + + Committed + + +

+
func (c *Consumer) Committed(partitions []TopicPartition, timeoutMs int) (offsets []TopicPartition, err error)
+

+ Committed retrieves committed offsets for the given set of partitions +

+ func (*Consumer) + + Events + + +

+
func (c *Consumer) Events() chan Event
+

+ Events returns the Events channel (if enabled) +

+ Deprecated: Events (channel based consumer) is deprecated in favour +of Poll(). +

+ func (*Consumer) + + GetConsumerGroupMetadata + + +

+
func (c *Consumer) GetConsumerGroupMetadata() (*ConsumerGroupMetadata, error)
+

+ GetConsumerGroupMetadata returns the consumer's current group metadata. +This object should be passed to the transactional producer's +SendOffsetsToTransaction() API. +

+ func (*Consumer) + + GetMetadata + + +

+
func (c *Consumer) GetMetadata(topic *string, allTopics bool, timeoutMs int) (*Metadata, error)
+

+ GetMetadata queries broker for cluster and topic metadata. +If topic is non-nil only information about that topic is returned, else if +allTopics is false only information about locally used topics is returned, +else information about all topics is returned. +GetMetadata is equivalent to listTopics, describeTopics and describeCluster in the Java API. +

+ func (*Consumer) + + GetRebalanceProtocol + + +

+
func (c *Consumer) GetRebalanceProtocol() string
+

+ GetRebalanceProtocol returns the current consumer group rebalance protocol, +which is either "EAGER" or "COOPERATIVE". +If the rebalance protocol is not known in the current state an empty string +is returned. +Should typically only be called during rebalancing. +

+ func (*Consumer) + + GetWatermarkOffsets + + +

+
func (c *Consumer) GetWatermarkOffsets(topic string, partition int32) (low, high int64, err error)
+

+ GetWatermarkOffsets returns the cached low and high offsets for the given topic +and partition. The high offset is populated on every fetch response or via calling QueryWatermarkOffsets. +The low offset is populated every statistics.interval.ms if that value is set. +OffsetInvalid will be returned if there is no cached offset for either value. +

+ func (*Consumer) + + IncrementalAssign + + +

+
func (c *Consumer) IncrementalAssign(partitions []TopicPartition) (err error)
+

+ IncrementalAssign adds the specified partitions to the current set of +partitions to consume. +

+ The .Offset field of each TopicPartition must either be set to an absolute +starting offset (>= 0), or one of the logical offsets (`kafka.OffsetEnd` etc), +but should typically be set to `kafka.OffsetStored` to have the consumer +use the committed offset as a start position, with a fallback to +`auto.offset.reset` if there is no committed offset. +

+ The new partitions must not be part of the current assignment. +

+ func (*Consumer) + + IncrementalUnassign + + +

+
func (c *Consumer) IncrementalUnassign(partitions []TopicPartition) (err error)
+

+ IncrementalUnassign removes the specified partitions from the current set of +partitions to consume. +

+ The .Offset field of the TopicPartition is ignored. +

+ The removed partitions must be part of the current assignment. +

+ func (*Consumer) + + IsClosed + + +

+
func (c *Consumer) IsClosed() bool
+

+ IsClosed returns boolean representing if client is closed or not +

+ func (*Consumer) + + Logs + + +

+
func (c *Consumer) Logs() chan LogEvent
+

+ Logs returns the log channel if enabled, or nil otherwise. +

+ func (*Consumer) + + OffsetsForTimes + + +

+
func (c *Consumer) OffsetsForTimes(times []TopicPartition, timeoutMs int) (offsets []TopicPartition, err error)
+

+ OffsetsForTimes looks up offsets by timestamp for the given partitions. +

+ The returned offset for each partition is the earliest offset whose +timestamp is greater than or equal to the given timestamp in the +corresponding partition. If the provided timestamp exceeds that of the +last message in the partition, a value of -1 will be returned. +

+ The timestamps to query are represented as `.Offset` in the `times` +argument and the looked up offsets are represented as `.Offset` in the returned +`offsets` list. +

+ The function will block for at most timeoutMs milliseconds. +

+ Duplicate Topic+Partitions are not supported. +Per-partition errors may be returned in the `.Error` field. +

+ func (*Consumer) + + Pause + + +

+
func (c *Consumer) Pause(partitions []TopicPartition) (err error)
+

+ Pause consumption for the provided list of partitions +

+ Note that messages already enqueued on the consumer's Event channel +(if `go.events.channel.enable` has been set) will NOT be purged by +this call, set `go.events.channel.size` accordingly. +

+ func (*Consumer) + + Poll + + +

+
func (c *Consumer) Poll(timeoutMs int) (event Event)
+

+ Poll the consumer for messages or events. +

+ Will block for at most timeoutMs milliseconds +

+

+ The following callbacks may be triggered: +

Subscribe()'s rebalanceCb
+
+

+ Returns nil on timeout, else an Event +

+ func (*Consumer) + + Position + + +

+
func (c *Consumer) Position(partitions []TopicPartition) (offsets []TopicPartition, err error)
+

+ Position returns the current consume position for the given partitions. +Typical use is to call Assignment() to get the partition list +and then pass it to Position() to get the current consume position for +each of the assigned partitions. +The consume position is the next message to read from the partition. +i.e., the offset of the last message seen by the application + 1. +

+ func (*Consumer) + + QueryWatermarkOffsets + + +

+
func (c *Consumer) QueryWatermarkOffsets(topic string, partition int32, timeoutMs int) (low, high int64, err error)
+

+ QueryWatermarkOffsets queries the broker for the low and high offsets for the given topic and partition. +

+ func (*Consumer) + + ReadMessage + + +

+
func (c *Consumer) ReadMessage(timeout time.Duration) (*Message, error)
+

+ ReadMessage polls the consumer for a message. +

+ This is a convenience API that wraps Poll() and only returns +messages or errors. All other event types are discarded. +

+ The call will block for at most `timeout` waiting for +a new message or error. `timeout` may be set to -1 for +indefinite wait. +

+ Timeout is returned as (nil, err) where `err.(kafka.Error).IsTimeout() == true`. +

+ Messages are returned as (msg, nil), +while general errors are returned as (nil, err), +and partition-specific errors are returned as (msg, err) where +msg.TopicPartition provides partition-specific information (such as topic, partition and offset). +

+ All other event types, such as PartitionEOF, AssignedPartitions, etc, are silently discarded. +

+ func (*Consumer) + + Resume + + +

+
func (c *Consumer) Resume(partitions []TopicPartition) (err error)
+

+ Resume consumption for the provided list of partitions +

+ func (*Consumer) + + Seek + + +

+
func (c *Consumer) Seek(partition TopicPartition, ignoredTimeoutMs int) error
+

+ Seek seeks the given topic partitions using the offset from the TopicPartition. +

+ The ignoredTimeoutMs parameter is ignored. Instead, this method blocks until +the fetcher state is updated for the given partition with the new offset. +This guarantees that no previously fetched messages for the old offset (or +fetch position) will be passed to the application once this call returns. +It will still take some time after the method returns until messages are +fetched at the new offset. +

+ Seek() may only be used for partitions already being consumed +(through Assign() or implicitly through a self-rebalanced Subscribe()). +To set the starting offset it is preferred to use Assign() and provide +a starting offset for each partition. +

+ Returns an error on failure or nil otherwise. +Deprecated: Seek is deprecated in favour of SeekPartitions(). +

+ func (*Consumer) + + SeekPartitions + + +

+
func (c *Consumer) SeekPartitions(partitions []TopicPartition) ([]TopicPartition, error)
+

+ SeekPartitions seeks the given topic partitions to the per-partition offset +stored in the .Offset field of each partition. +

+ The offset may be either absolute (>= 0) or a logical offset (e.g. OffsetEnd). +

+ SeekPartitions() may only be used for partitions already being consumed +(through Assign() or implicitly through a self-rebalanced Subscribe()). +To set the starting offset it is preferred to use Assign() in a +kafka.AssignedPartitions handler and provide a starting offset for each +partition. +

+ Returns an error on failure or nil otherwise. Individual partition errors +should be checked in the per-partition .Error field. +

+ func (*Consumer) + + SetOAuthBearerToken + + +

+
func (c *Consumer) SetOAuthBearerToken(oauthBearerToken OAuthBearerToken) error
+

+ SetOAuthBearerToken sets the the data to be transmitted +to a broker during SASL/OAUTHBEARER authentication. It will return nil +on success, otherwise an error if: +1) the token data is invalid (meaning an expiration time in the past +or either a token value or an extension key or value that does not meet +the regular expression requirements as per + + https://tools.ietf.org/html/rfc7628#section-3.1 + + ); +2) SASL/OAUTHBEARER is not supported by the underlying librdkafka build; +3) SASL/OAUTHBEARER is supported but is not configured as the client's +authentication mechanism. +

+ func (*Consumer) + + SetOAuthBearerTokenFailure + + +

+
func (c *Consumer) SetOAuthBearerTokenFailure(errstr string) error
+

+ SetOAuthBearerTokenFailure sets the error message describing why token +retrieval/setting failed; it also schedules a new token refresh event for 10 +seconds later so the attempt may be retried. It will return nil on +success, otherwise an error if: +1) SASL/OAUTHBEARER is not supported by the underlying librdkafka build; +2) SASL/OAUTHBEARER is supported but is not configured as the client's +authentication mechanism. +

+ func (*Consumer) + + SetSaslCredentials + + +

+
func (c *Consumer) SetSaslCredentials(username, password string) error
+

+ SetSaslCredentials sets the SASL credentials used for this consumer. The new credentials +will overwrite the old ones (which were set when creating the consumer or by a previous +call to SetSaslCredentials). The new credentials will be used the next time the +consumer needs to authenticate to a broker. This method will not disconnect +existing broker connections that were established with the old credentials. +This method applies only to the SASL PLAIN and SCRAM mechanisms. +

+ func (*Consumer) + + StoreMessage + + +

+
func (c *Consumer) StoreMessage(m *Message) (storedOffsets []TopicPartition, err error)
+

+ StoreMessage stores offset based on the provided message. +This is a convenience method that uses StoreOffsets to do the actual work. +

+ func (*Consumer) + + StoreOffsets + + +

+
func (c *Consumer) StoreOffsets(offsets []TopicPartition) (storedOffsets []TopicPartition, err error)
+

+ StoreOffsets stores the provided list of offsets that will be committed +to the offset store according to `auto.commit.interval.ms` or manual +offset-less Commit(). +

+ Returns the stored offsets on success. If at least one offset couldn't be stored, +an error and a list of offsets is returned. Each offset can be checked for +specific errors via its `.Error` member. +

+ func (*Consumer) + + String + + +

+
func (c *Consumer) String() string
+

+ Strings returns a human readable name for a Consumer instance +

+ func (*Consumer) + + Subscribe + + +

+
func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error
+

+ Subscribe to a single topic +This replaces the current subscription +

+ func (*Consumer) + + SubscribeTopics + + +

+
func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) (err error)
+

+ SubscribeTopics subscribes to the provided list of topics. +This replaces the current subscription. +

+ func (*Consumer) + + Subscription + + +

+
func (c *Consumer) Subscription() (topics []string, err error)
+

+ Subscription returns the current subscription as set by Subscribe() +

+ func (*Consumer) + + Unassign + + +

+
func (c *Consumer) Unassign() (err error)
+

+ Unassign the current set of partitions to consume. +

+ func (*Consumer) + + Unsubscribe + + +

+
func (c *Consumer) Unsubscribe() (err error)
+

+ Unsubscribe from the current subscription, if any. +

+ type + + ConsumerGroupDescription + + +

+

+ ConsumerGroupDescription represents the result of DescribeConsumerGroups for +a single group. +

type ConsumerGroupDescription struct {
+    // Group id.
+    GroupID string
+    // Error, if any, of result. Check with `Error.Code() != ErrNoError`.
+    Error Error
+    // Is a simple consumer group.
+    IsSimpleConsumerGroup bool
+    // Partition assignor identifier.
+    PartitionAssignor string
+    // Consumer group state.
+    State ConsumerGroupState
+    // Consumer group coordinator (has ID == -1 if not known).
+    Coordinator Node
+    // Members list.
+    Members []MemberDescription
+    // Operations allowed for the group (nil if not available or not requested)
+    AuthorizedOperations []ACLOperation
+}
+
+

+ type + + ConsumerGroupListing + + +

+

+ ConsumerGroupListing represents the result of ListConsumerGroups for a single +group. +

type ConsumerGroupListing struct {
+    // Group id.
+    GroupID string
+    // Is a simple consumer group.
+    IsSimpleConsumerGroup bool
+    // Group state.
+    State ConsumerGroupState
+    // Group type.
+    Type ConsumerGroupType
+}
+
+

+ type + + ConsumerGroupMetadata + + +

+

+ ConsumerGroupMetadata reflects the current consumer group member metadata. +

type ConsumerGroupMetadata struct {
+    // contains filtered or unexported fields
+}
+
+

+ func + + NewTestConsumerGroupMetadata + + +

+
func NewTestConsumerGroupMetadata(groupID string) (*ConsumerGroupMetadata, error)
+

+ NewTestConsumerGroupMetadata creates a new consumer group metadata instance +mainly for testing use. +Use GetConsumerGroupMetadata() to retrieve the real metadata. +

+ type + + ConsumerGroupResult + + +

+

+ ConsumerGroupResult provides per-group operation result (error) information. +

type ConsumerGroupResult struct {
+    // Group name
+    Group string
+    // Error, if any, of result. Check with `Error.Code() != ErrNoError`.
+    Error Error
+}
+
+

+ func (ConsumerGroupResult) + + String + + +

+
func (g ConsumerGroupResult) String() string
+

+ String returns a human-readable representation of a ConsumerGroupResult. +

+ type + + ConsumerGroupState + + +

+

+ ConsumerGroupState represents a consumer group state +

type ConsumerGroupState int
+
const (
+    // ConsumerGroupStateUnknown - Unknown ConsumerGroupState
+    ConsumerGroupStateUnknown ConsumerGroupState = C.RD_KAFKA_CONSUMER_GROUP_STATE_UNKNOWN
+    // ConsumerGroupStatePreparingRebalance - preparing rebalance
+    ConsumerGroupStatePreparingRebalance ConsumerGroupState = C.RD_KAFKA_CONSUMER_GROUP_STATE_PREPARING_REBALANCE
+    // ConsumerGroupStateCompletingRebalance - completing rebalance
+    ConsumerGroupStateCompletingRebalance ConsumerGroupState = C.RD_KAFKA_CONSUMER_GROUP_STATE_COMPLETING_REBALANCE
+    // ConsumerGroupStateStable - stable
+    ConsumerGroupStateStable ConsumerGroupState = C.RD_KAFKA_CONSUMER_GROUP_STATE_STABLE
+    // ConsumerGroupStateDead - dead group
+    ConsumerGroupStateDead ConsumerGroupState = C.RD_KAFKA_CONSUMER_GROUP_STATE_DEAD
+    // ConsumerGroupStateEmpty - empty group
+    ConsumerGroupStateEmpty ConsumerGroupState = C.RD_KAFKA_CONSUMER_GROUP_STATE_EMPTY
+)
+

+ func + + ConsumerGroupStateFromString + + +

+
func ConsumerGroupStateFromString(stateString string) (ConsumerGroupState, error)
+

+ ConsumerGroupStateFromString translates a consumer group state name/string to +a ConsumerGroupState value. +

+ func (ConsumerGroupState) + + String + + +

+
func (t ConsumerGroupState) String() string
+

+ String returns the human-readable representation of a consumer_group_state +

+ type + + ConsumerGroupTopicPartitions + + +

+

+ ConsumerGroupTopicPartitions represents a consumer group's TopicPartitions. +

type ConsumerGroupTopicPartitions struct {
+    // Group name
+    Group string
+    // Partitions list
+    Partitions []TopicPartition
+}
+
+

+ func (ConsumerGroupTopicPartitions) + + String + + +

+
func (gtp ConsumerGroupTopicPartitions) String() string
+

+ type + + ConsumerGroupType + + +

+

+ ConsumerGroupType represents a consumer group type +

type ConsumerGroupType int
+
const (
+    // ConsumerGroupTypeUnknown - Unknown ConsumerGroupType
+    ConsumerGroupTypeUnknown ConsumerGroupType = C.RD_KAFKA_CONSUMER_GROUP_TYPE_UNKNOWN
+    // ConsumerGroupTypeConsumer - Consumer ConsumerGroupType
+    ConsumerGroupTypeConsumer ConsumerGroupType = C.RD_KAFKA_CONSUMER_GROUP_TYPE_CONSUMER
+    // ConsumerGroupTypeClassic - Classic ConsumerGroupType
+    ConsumerGroupTypeClassic ConsumerGroupType = C.RD_KAFKA_CONSUMER_GROUP_TYPE_CLASSIC
+)
+

+ func + + ConsumerGroupTypeFromString + + +

+
func ConsumerGroupTypeFromString(typeString string) ConsumerGroupType
+

+ ConsumerGroupTypeFromString translates a consumer group type name/string to +a ConsumerGroupType value. +

+ func (ConsumerGroupType) + + String + + +

+
func (t ConsumerGroupType) String() string
+

+ String returns the human-readable representation of a ConsumerGroupType +

+ type + + CreateACLResult + + +

+

+ CreateACLResult provides create ACL error information. +

type CreateACLResult struct {
+    // Error, if any, of result. Check with `Error.Code() != ErrNoError`.
+    Error Error
+}
+
+

+ type + + CreateACLsAdminOption + + +

+

+ CreateACLsAdminOption - see setter. +

+ See SetAdminRequestTimeout +

type CreateACLsAdminOption interface {
+    // contains filtered or unexported methods
+}
+

+ type + + CreatePartitionsAdminOption + + +

+

+ CreatePartitionsAdminOption - see setters. +

+ See SetAdminRequestTimeout, SetAdminOperationTimeout, SetAdminValidateOnly. +

type CreatePartitionsAdminOption interface {
+    // contains filtered or unexported methods
+}
+

+ type + + CreateTopicsAdminOption + + +

+

+ CreateTopicsAdminOption - see setters. +

+ See SetAdminRequestTimeout, SetAdminOperationTimeout, SetAdminValidateOnly. +

type CreateTopicsAdminOption interface {
+    // contains filtered or unexported methods
+}
+

+ type + + DeleteACLsAdminOption + + +

+

+ DeleteACLsAdminOption - see setter. +

+ See SetAdminRequestTimeout +

type DeleteACLsAdminOption interface {
+    // contains filtered or unexported methods
+}
+

+ type + + DeleteACLsResult + + +

+

+ DeleteACLsResult provides delete ACLs result or error information. +

type DeleteACLsResult = DescribeACLsResult
+

+ type + + DeleteConsumerGroupsAdminOption + + +

+

+ DeleteConsumerGroupsAdminOption - see setters. +

+ See SetAdminRequestTimeout. +

type DeleteConsumerGroupsAdminOption interface {
+    // contains filtered or unexported methods
+}
+

+ type + + DeleteConsumerGroupsResult + + +

+

+ DeleteConsumerGroupsResult represents the result of a DeleteConsumerGroups +call. +

type DeleteConsumerGroupsResult struct {
+    // Slice of ConsumerGroupResult.
+    ConsumerGroupResults []ConsumerGroupResult
+}
+
+

+ type + + DeleteRecordsAdminOption + + +

+

+ DeleteRecordsAdminOption - see setter. +

+ See SetAdminRequestTimeout, SetAdminOperationTimeout. +

type DeleteRecordsAdminOption interface {
+    // contains filtered or unexported methods
+}
+

+ type + + DeleteRecordsResult + + +

+

+ DeleteRecordsResult represents the result of a DeleteRecords call +for a single partition. +

type DeleteRecordsResult struct {
+    // One of requested partitions.
+    // The Error field is set if any occurred for that partition.
+    TopicPartition TopicPartition
+    // Deleted records information, or nil if an error occurred.
+    DeletedRecords *DeletedRecords
+}
+
+

+ type + + DeleteRecordsResults + + +

+

+ DeleteRecordsResults represents the results of a DeleteRecords call. +

type DeleteRecordsResults struct {
+    // A slice of DeleteRecordsResult, one for each requested topic partition.
+    DeleteRecordsResults []DeleteRecordsResult
+}
+
+

+ type + + DeleteTopicsAdminOption + + +

+

+ DeleteTopicsAdminOption - see setters. +

+ See SetAdminRequestTimeout, SetAdminOperationTimeout. +

type DeleteTopicsAdminOption interface {
+    // contains filtered or unexported methods
+}
+

+ type + + DeletedRecords + + +

+

+ DeletedRecords contains information about deleted +records of a single partition +

type DeletedRecords struct {
+    // Low-watermark offset after deletion
+    LowWatermark Offset
+}
+
+

+ type + + DescribeACLsAdminOption + + +

+

+ DescribeACLsAdminOption - see setter. +

+ See SetAdminRequestTimeout +

type DescribeACLsAdminOption interface {
+    // contains filtered or unexported methods
+}
+

+ type + + DescribeACLsResult + + +

+

+ DescribeACLsResult provides describe ACLs result or error information. +

type DescribeACLsResult struct {
+    // Slice of ACL bindings matching the provided filter
+    ACLBindings ACLBindings
+    // Error, if any, of result. Check with `Error.Code() != ErrNoError`.
+    Error Error
+}
+
+

+ type + + DescribeClusterAdminOption + + +

+

+ DescribeClusterAdminOption - see setter. +

+ See SetAdminRequestTimeout, SetAdminOptionIncludeAuthorizedOperations. +

type DescribeClusterAdminOption interface {
+    // contains filtered or unexported methods
+}
+

+ type + + DescribeClusterResult + + +

+

+ DescribeClusterResult represents the result of DescribeCluster. +

type DescribeClusterResult struct {
+    // Cluster id for the cluster (always available if broker version >= 0.10.1.0, otherwise nil).
+    ClusterID *string
+    // Current controller broker for the cluster (nil if there is none).
+    Controller *Node
+    // List of brokers in the cluster.
+    Nodes []Node
+    // Operations allowed for the cluster (nil if not available or not requested).
+    AuthorizedOperations []ACLOperation
+}
+
+

+ type + + DescribeConfigsAdminOption + + +

+

+ DescribeConfigsAdminOption - see setters. +

+ See SetAdminRequestTimeout. +

type DescribeConfigsAdminOption interface {
+    // contains filtered or unexported methods
+}
+

+ type + + DescribeConsumerGroupsAdminOption + + +

+

+ DescribeConsumerGroupsAdminOption - see setter. +

+ See SetAdminRequestTimeout, SetAdminOptionIncludeAuthorizedOperations. +

type DescribeConsumerGroupsAdminOption interface {
+    // contains filtered or unexported methods
+}
+

+ type + + DescribeConsumerGroupsResult + + +

+

+ DescribeConsumerGroupsResult represents the result of a +DescribeConsumerGroups call. +

type DescribeConsumerGroupsResult struct {
+    // Slice of ConsumerGroupDescription.
+    ConsumerGroupDescriptions []ConsumerGroupDescription
+}
+
+

+ type + + DescribeTopicsAdminOption + + +

+

+ DescribeTopicsAdminOption - see setter. +

+ See SetAdminRequestTimeout, SetAdminOptionIncludeAuthorizedOperations. +

type DescribeTopicsAdminOption interface {
+    // contains filtered or unexported methods
+}
+

+ type + + DescribeTopicsResult + + +

+

+ DescribeTopicsResult represents the result of a +DescribeTopics call. +

type DescribeTopicsResult struct {
+    // Slice of TopicDescription.
+    TopicDescriptions []TopicDescription
+}
+
+

+ type + + DescribeUserScramCredentialsAdminOption + + +

+

+ DescribeUserScramCredentialsAdminOption - see setter. +

+ See SetAdminRequestTimeout. +

type DescribeUserScramCredentialsAdminOption interface {
+    // contains filtered or unexported methods
+}
+

+ type + + DescribeUserScramCredentialsResult + + +

+

+ DescribeUserScramCredentialsResult represents the result of a +DescribeUserScramCredentials call. +

type DescribeUserScramCredentialsResult struct {
+    // Descriptions - Map from user name
+    // to UserScramCredentialsDescription
+    Descriptions map[string]UserScramCredentialsDescription
+}
+
+

+ type + + ElectLeadersAdminOption + + +

+

+ ElectLeadersAdminOption - see setter. +

+ See SetAdminRequestTimeout, SetAdminOperationTimeout. +

type ElectLeadersAdminOption interface {
+    // contains filtered or unexported methods
+}
+

+ type + + ElectLeadersRequest + + +

+

+ ElectLeadersRequest holds parameters for the type of election to be performed and +the topic partitions for which election has to be performed +

type ElectLeadersRequest struct {
+    // contains filtered or unexported fields
+}
+
+

+ func + + NewElectLeadersRequest + + +

+
func NewElectLeadersRequest(electionType ElectionType, partitions []TopicPartition) ElectLeadersRequest
+

+ NewElectLeadersRequest creates a new ElectLeadersRequest with the given election type +and topic partitions +

+ type + + ElectLeadersResult + + +

+

+ ElectLeadersResult holds the result of the election performed +

type ElectLeadersResult struct {
+    // TopicPartitions for which election has been performed and the per-partition error, if any
+    // that occurred while running the election for the specific TopicPartition.
+    TopicPartitions []TopicPartition
+}
+
+

+ type + + ElectionType + + +

+

+ ElectionType represents the type of election to be performed +

type ElectionType int
+
const (
+    // ElectionTypePreferred - Preferred election type
+    ElectionTypePreferred ElectionType = C.RD_KAFKA_ELECTION_TYPE_PREFERRED
+    // ElectionTypeUnclean - Unclean election type
+    ElectionTypeUnclean ElectionType = C.RD_KAFKA_ELECTION_TYPE_UNCLEAN
+)
+

+ func + + ElectionTypeFromString + + +

+
func ElectionTypeFromString(electionTypeString string) (ElectionType, error)
+

+ ElectionTypeFromString translates an election type name to +an ElectionType value. +

+ type + + Error + + +

+

+ Error provides a Kafka-specific error container +

type Error struct {
+    // contains filtered or unexported fields
+}
+
+

+ func + + NewError + + +

+
func NewError(code ErrorCode, str string, fatal bool) (err Error)
+

+ NewError creates a new Error. +

+ func (Error) + + Code + + +

+
func (e Error) Code() ErrorCode
+

+ Code returns the ErrorCode of an Error +

+ func (Error) + + Error + + +

+
func (e Error) Error() string
+

+ Error returns a human readable representation of an Error +Same as Error.String() +

+ func (Error) + + IsFatal + + +

+
func (e Error) IsFatal() bool
+

+ IsFatal returns true if the error is a fatal error. +A fatal error indicates the client instance is no longer operable and +should be terminated. Typical causes include non-recoverable +idempotent producer errors. +

+ func (Error) + + IsRetriable + + +

+
func (e Error) IsRetriable() bool
+

+ IsRetriable returns true if the operation that caused this error +may be retried. +This flag is currently only set by the Transactional producer API. +

+ func (Error) + + IsTimeout + + +

+
func (e Error) IsTimeout() bool
+

+ IsTimeout returns true if the error is a timeout error. +A timeout error indicates that the operation timed out locally. +

+ func (Error) + + String + + +

+
func (e Error) String() string
+

+ String returns a human readable representation of an Error +

+ func (Error) + + TxnRequiresAbort + + +

+
func (e Error) TxnRequiresAbort() bool
+

+ TxnRequiresAbort returns true if the error is an abortable transaction error +that requires the application to abort the current transaction with +AbortTransaction() and start a new transaction with BeginTransaction() +if it wishes to proceed with transactional operations. +This flag is only set by the Transactional producer API. +

+ type + + ErrorCode + + +

+

+ ErrorCode is the integer representation of local and broker error codes +

type ErrorCode int
+
const (
+    // ErrBadMsg Local: Bad message format
+    ErrBadMsg ErrorCode = C.RD_KAFKA_RESP_ERR__BAD_MSG
+    // ErrBadCompression Local: Invalid compressed data
+    ErrBadCompression ErrorCode = C.RD_KAFKA_RESP_ERR__BAD_COMPRESSION
+    // ErrDestroy Local: Broker handle destroyed
+    ErrDestroy ErrorCode = C.RD_KAFKA_RESP_ERR__DESTROY
+    // ErrFail Local: Communication failure with broker
+    ErrFail ErrorCode = C.RD_KAFKA_RESP_ERR__FAIL
+    // ErrTransport Local: Broker transport failure
+    ErrTransport ErrorCode = C.RD_KAFKA_RESP_ERR__TRANSPORT
+    // ErrCritSysResource Local: Critical system resource failure
+    ErrCritSysResource ErrorCode = C.RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE
+    // ErrResolve Local: Host resolution failure
+    ErrResolve ErrorCode = C.RD_KAFKA_RESP_ERR__RESOLVE
+    // ErrMsgTimedOut Local: Message timed out
+    ErrMsgTimedOut ErrorCode = C.RD_KAFKA_RESP_ERR__MSG_TIMED_OUT
+    // ErrPartitionEOF Broker: No more messages
+    ErrPartitionEOF ErrorCode = C.RD_KAFKA_RESP_ERR__PARTITION_EOF
+    // ErrUnknownPartition Local: Unknown partition
+    ErrUnknownPartition ErrorCode = C.RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION
+    // ErrFs Local: File or filesystem error
+    ErrFs ErrorCode = C.RD_KAFKA_RESP_ERR__FS
+    // ErrUnknownTopic Local: Unknown topic
+    ErrUnknownTopic ErrorCode = C.RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC
+    // ErrAllBrokersDown Local: All broker connections are down
+    ErrAllBrokersDown ErrorCode = C.RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN
+    // ErrInvalidArg Local: Invalid argument or configuration
+    ErrInvalidArg ErrorCode = C.RD_KAFKA_RESP_ERR__INVALID_ARG
+    // ErrTimedOut Local: Timed out
+    ErrTimedOut ErrorCode = C.RD_KAFKA_RESP_ERR__TIMED_OUT
+    // ErrQueueFull Local: Queue full
+    ErrQueueFull ErrorCode = C.RD_KAFKA_RESP_ERR__QUEUE_FULL
+    // ErrIsrInsuff Local: ISR count insufficient
+    ErrIsrInsuff ErrorCode = C.RD_KAFKA_RESP_ERR__ISR_INSUFF
+    // ErrNodeUpdate Local: Broker node update
+    ErrNodeUpdate ErrorCode = C.RD_KAFKA_RESP_ERR__NODE_UPDATE
+    // ErrSsl Local: SSL error
+    ErrSsl ErrorCode = C.RD_KAFKA_RESP_ERR__SSL
+    // ErrWaitCoord Local: Waiting for coordinator
+    ErrWaitCoord ErrorCode = C.RD_KAFKA_RESP_ERR__WAIT_COORD
+    // ErrUnknownGroup Local: Unknown group
+    ErrUnknownGroup ErrorCode = C.RD_KAFKA_RESP_ERR__UNKNOWN_GROUP
+    // ErrInProgress Local: Operation in progress
+    ErrInProgress ErrorCode = C.RD_KAFKA_RESP_ERR__IN_PROGRESS
+    // ErrPrevInProgress Local: Previous operation in progress
+    ErrPrevInProgress ErrorCode = C.RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS
+    // ErrExistingSubscription Local: Existing subscription
+    ErrExistingSubscription ErrorCode = C.RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION
+    // ErrAssignPartitions Local: Assign partitions
+    ErrAssignPartitions ErrorCode = C.RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS
+    // ErrRevokePartitions Local: Revoke partitions
+    ErrRevokePartitions ErrorCode = C.RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS
+    // ErrConflict Local: Conflicting use
+    ErrConflict ErrorCode = C.RD_KAFKA_RESP_ERR__CONFLICT
+    // ErrState Local: Erroneous state
+    ErrState ErrorCode = C.RD_KAFKA_RESP_ERR__STATE
+    // ErrUnknownProtocol Local: Unknown protocol
+    ErrUnknownProtocol ErrorCode = C.RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL
+    // ErrNotImplemented Local: Not implemented
+    ErrNotImplemented ErrorCode = C.RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED
+    // ErrAuthentication Local: Authentication failure
+    ErrAuthentication ErrorCode = C.RD_KAFKA_RESP_ERR__AUTHENTICATION
+    // ErrNoOffset Local: No offset stored
+    ErrNoOffset ErrorCode = C.RD_KAFKA_RESP_ERR__NO_OFFSET
+    // ErrOutdated Local: Outdated
+    ErrOutdated ErrorCode = C.RD_KAFKA_RESP_ERR__OUTDATED
+    // ErrTimedOutQueue Local: Timed out in queue
+    ErrTimedOutQueue ErrorCode = C.RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE
+    // ErrUnsupportedFeature Local: Required feature not supported by broker
+    ErrUnsupportedFeature ErrorCode = C.RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE
+    // ErrWaitCache Local: Awaiting cache update
+    ErrWaitCache ErrorCode = C.RD_KAFKA_RESP_ERR__WAIT_CACHE
+    // ErrIntr Local: Operation interrupted
+    ErrIntr ErrorCode = C.RD_KAFKA_RESP_ERR__INTR
+    // ErrKeySerialization Local: Key serialization error
+    ErrKeySerialization ErrorCode = C.RD_KAFKA_RESP_ERR__KEY_SERIALIZATION
+    // ErrValueSerialization Local: Value serialization error
+    ErrValueSerialization ErrorCode = C.RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION
+    // ErrKeyDeserialization Local: Key deserialization error
+    ErrKeyDeserialization ErrorCode = C.RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION
+    // ErrValueDeserialization Local: Value deserialization error
+    ErrValueDeserialization ErrorCode = C.RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION
+    // ErrPartial Local: Partial response
+    ErrPartial ErrorCode = C.RD_KAFKA_RESP_ERR__PARTIAL
+    // ErrReadOnly Local: Read-only object
+    ErrReadOnly ErrorCode = C.RD_KAFKA_RESP_ERR__READ_ONLY
+    // ErrNoent Local: No such entry
+    ErrNoent ErrorCode = C.RD_KAFKA_RESP_ERR__NOENT
+    // ErrUnderflow Local: Read underflow
+    ErrUnderflow ErrorCode = C.RD_KAFKA_RESP_ERR__UNDERFLOW
+    // ErrInvalidType Local: Invalid type
+    ErrInvalidType ErrorCode = C.RD_KAFKA_RESP_ERR__INVALID_TYPE
+    // ErrRetry Local: Retry operation
+    ErrRetry ErrorCode = C.RD_KAFKA_RESP_ERR__RETRY
+    // ErrPurgeQueue Local: Purged in queue
+    ErrPurgeQueue ErrorCode = C.RD_KAFKA_RESP_ERR__PURGE_QUEUE
+    // ErrPurgeInflight Local: Purged in flight
+    ErrPurgeInflight ErrorCode = C.RD_KAFKA_RESP_ERR__PURGE_INFLIGHT
+    // ErrFatal Local: Fatal error
+    ErrFatal ErrorCode = C.RD_KAFKA_RESP_ERR__FATAL
+    // ErrInconsistent Local: Inconsistent state
+    ErrInconsistent ErrorCode = C.RD_KAFKA_RESP_ERR__INCONSISTENT
+    // ErrGaplessGuarantee Local: Gap-less ordering would not be guaranteed if proceeding
+    ErrGaplessGuarantee ErrorCode = C.RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE
+    // ErrMaxPollExceeded Local: Maximum application poll interval (max.poll.interval.ms) exceeded
+    ErrMaxPollExceeded ErrorCode = C.RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED
+    // ErrUnknownBroker Local: Unknown broker
+    ErrUnknownBroker ErrorCode = C.RD_KAFKA_RESP_ERR__UNKNOWN_BROKER
+    // ErrNotConfigured Local: Functionality not configured
+    ErrNotConfigured ErrorCode = C.RD_KAFKA_RESP_ERR__NOT_CONFIGURED
+    // ErrFenced Local: This instance has been fenced by a newer instance
+    ErrFenced ErrorCode = C.RD_KAFKA_RESP_ERR__FENCED
+    // ErrApplication Local: Application generated error
+    ErrApplication ErrorCode = C.RD_KAFKA_RESP_ERR__APPLICATION
+    // ErrAssignmentLost Local: Group partition assignment lost
+    ErrAssignmentLost ErrorCode = C.RD_KAFKA_RESP_ERR__ASSIGNMENT_LOST
+    // ErrNoop Local: No operation performed
+    ErrNoop ErrorCode = C.RD_KAFKA_RESP_ERR__NOOP
+    // ErrAutoOffsetReset Local: No offset to automatically reset to
+    ErrAutoOffsetReset ErrorCode = C.RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET
+    // ErrLogTruncation Local: Partition log truncation detected
+    ErrLogTruncation ErrorCode = C.RD_KAFKA_RESP_ERR__LOG_TRUNCATION
+    // ErrInvalidDifferentRecord Local: an invalid record in the same batch caused the failure of this message too.
+    ErrInvalidDifferentRecord ErrorCode = C.RD_KAFKA_RESP_ERR__INVALID_DIFFERENT_RECORD
+    // ErrUnknown Unknown broker error
+    ErrUnknown ErrorCode = C.RD_KAFKA_RESP_ERR_UNKNOWN
+    // ErrNoError Success
+    ErrNoError ErrorCode = C.RD_KAFKA_RESP_ERR_NO_ERROR
+    // ErrOffsetOutOfRange Broker: Offset out of range
+    ErrOffsetOutOfRange ErrorCode = C.RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE
+    // ErrInvalidMsg Broker: Invalid message
+    ErrInvalidMsg ErrorCode = C.RD_KAFKA_RESP_ERR_INVALID_MSG
+    // ErrUnknownTopicOrPart Broker: Unknown topic or partition
+    ErrUnknownTopicOrPart ErrorCode = C.RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART
+    // ErrInvalidMsgSize Broker: Invalid message size
+    ErrInvalidMsgSize ErrorCode = C.RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE
+    // ErrLeaderNotAvailable Broker: Leader not available
+    ErrLeaderNotAvailable ErrorCode = C.RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE
+    // ErrNotLeaderForPartition Broker: Not leader for partition
+    ErrNotLeaderForPartition ErrorCode = C.RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION
+    // ErrRequestTimedOut Broker: Request timed out
+    ErrRequestTimedOut ErrorCode = C.RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT
+    // ErrBrokerNotAvailable Broker: Broker not available
+    ErrBrokerNotAvailable ErrorCode = C.RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE
+    // ErrReplicaNotAvailable Broker: Replica not available
+    ErrReplicaNotAvailable ErrorCode = C.RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE
+    // ErrMsgSizeTooLarge Broker: Message size too large
+    ErrMsgSizeTooLarge ErrorCode = C.RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE
+    // ErrStaleCtrlEpoch Broker: StaleControllerEpochCode
+    ErrStaleCtrlEpoch ErrorCode = C.RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH
+    // ErrOffsetMetadataTooLarge Broker: Offset metadata string too large
+    ErrOffsetMetadataTooLarge ErrorCode = C.RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE
+    // ErrNetworkException Broker: Broker disconnected before response received
+    ErrNetworkException ErrorCode = C.RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION
+    // ErrCoordinatorLoadInProgress Broker: Coordinator load in progress
+    ErrCoordinatorLoadInProgress ErrorCode = C.RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS
+    // ErrCoordinatorNotAvailable Broker: Coordinator not available
+    ErrCoordinatorNotAvailable ErrorCode = C.RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE
+    // ErrNotCoordinator Broker: Not coordinator
+    ErrNotCoordinator ErrorCode = C.RD_KAFKA_RESP_ERR_NOT_COORDINATOR
+    // ErrTopicException Broker: Invalid topic
+    ErrTopicException ErrorCode = C.RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION
+    // ErrRecordListTooLarge Broker: Message batch larger than configured server segment size
+    ErrRecordListTooLarge ErrorCode = C.RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE
+    // ErrNotEnoughReplicas Broker: Not enough in-sync replicas
+    ErrNotEnoughReplicas ErrorCode = C.RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS
+    // ErrNotEnoughReplicasAfterAppend Broker: Message(s) written to insufficient number of in-sync replicas
+    ErrNotEnoughReplicasAfterAppend ErrorCode = C.RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND
+    // ErrInvalidRequiredAcks Broker: Invalid required acks value
+    ErrInvalidRequiredAcks ErrorCode = C.RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS
+    // ErrIllegalGeneration Broker: Specified group generation id is not valid
+    ErrIllegalGeneration ErrorCode = C.RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION
+    // ErrInconsistentGroupProtocol Broker: Inconsistent group protocol
+    ErrInconsistentGroupProtocol ErrorCode = C.RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL
+    // ErrInvalidGroupID Broker: Invalid group.id
+    ErrInvalidGroupID ErrorCode = C.RD_KAFKA_RESP_ERR_INVALID_GROUP_ID
+    // ErrUnknownMemberID Broker: Unknown member
+    ErrUnknownMemberID ErrorCode = C.RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID
+    // ErrInvalidSessionTimeout Broker: Invalid session timeout
+    ErrInvalidSessionTimeout ErrorCode = C.RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT
+    // ErrRebalanceInProgress Broker: Group rebalance in progress
+    ErrRebalanceInProgress ErrorCode = C.RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS
+    // ErrInvalidCommitOffsetSize Broker: Commit offset data size is not valid
+    ErrInvalidCommitOffsetSize ErrorCode = C.RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE
+    // ErrTopicAuthorizationFailed Broker: Topic authorization failed
+    ErrTopicAuthorizationFailed ErrorCode = C.RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED
+    // ErrGroupAuthorizationFailed Broker: Group authorization failed
+    ErrGroupAuthorizationFailed ErrorCode = C.RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED
+    // ErrClusterAuthorizationFailed Broker: Cluster authorization failed
+    ErrClusterAuthorizationFailed ErrorCode = C.RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED
+    // ErrInvalidTimestamp Broker: Invalid timestamp
+    ErrInvalidTimestamp ErrorCode = C.RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP
+    // ErrUnsupportedSaslMechanism Broker: Unsupported SASL mechanism
+    ErrUnsupportedSaslMechanism ErrorCode = C.RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM
+    // ErrIllegalSaslState Broker: Request not valid in current SASL state
+    ErrIllegalSaslState ErrorCode = C.RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE
+    // ErrUnsupportedVersion Broker: API version not supported
+    ErrUnsupportedVersion ErrorCode = C.RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION
+    // ErrTopicAlreadyExists Broker: Topic already exists
+    ErrTopicAlreadyExists ErrorCode = C.RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS
+    // ErrInvalidPartitions Broker: Invalid number of partitions
+    ErrInvalidPartitions ErrorCode = C.RD_KAFKA_RESP_ERR_INVALID_PARTITIONS
+    // ErrInvalidReplicationFactor Broker: Invalid replication factor
+    ErrInvalidReplicationFactor ErrorCode = C.RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR
+    // ErrInvalidReplicaAssignment Broker: Invalid replica assignment
+    ErrInvalidReplicaAssignment ErrorCode = C.RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT
+    // ErrInvalidConfig Broker: Configuration is invalid
+    ErrInvalidConfig ErrorCode = C.RD_KAFKA_RESP_ERR_INVALID_CONFIG
+    // ErrNotController Broker: Not controller for cluster
+    ErrNotController ErrorCode = C.RD_KAFKA_RESP_ERR_NOT_CONTROLLER
+    // ErrInvalidRequest Broker: Invalid request
+    ErrInvalidRequest ErrorCode = C.RD_KAFKA_RESP_ERR_INVALID_REQUEST
+    // ErrUnsupportedForMessageFormat Broker: Message format on broker does not support request
+    ErrUnsupportedForMessageFormat ErrorCode = C.RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT
+    // ErrPolicyViolation Broker: Policy violation
+    ErrPolicyViolation ErrorCode = C.RD_KAFKA_RESP_ERR_POLICY_VIOLATION
+    // ErrOutOfOrderSequenceNumber Broker: Broker received an out of order sequence number
+    ErrOutOfOrderSequenceNumber ErrorCode = C.RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER
+    // ErrDuplicateSequenceNumber Broker: Broker received a duplicate sequence number
+    ErrDuplicateSequenceNumber ErrorCode = C.RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER
+    // ErrInvalidProducerEpoch Broker: Producer attempted an operation with an old epoch
+    ErrInvalidProducerEpoch ErrorCode = C.RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH
+    // ErrInvalidTxnState Broker: Producer attempted a transactional operation in an invalid state
+    ErrInvalidTxnState ErrorCode = C.RD_KAFKA_RESP_ERR_INVALID_TXN_STATE
+    // ErrInvalidProducerIDMapping Broker: Producer attempted to use a producer id which is not currently assigned to its transactional id
+    ErrInvalidProducerIDMapping ErrorCode = C.RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING
+    // ErrInvalidTransactionTimeout Broker: Transaction timeout is larger than the maximum value allowed by the broker's max.transaction.timeout.ms
+    ErrInvalidTransactionTimeout ErrorCode = C.RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT
+    // ErrConcurrentTransactions Broker: Producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing
+    ErrConcurrentTransactions ErrorCode = C.RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS
+    // ErrTransactionCoordinatorFenced Broker: Indicates that the transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer
+    ErrTransactionCoordinatorFenced ErrorCode = C.RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED
+    // ErrTransactionalIDAuthorizationFailed Broker: Transactional Id authorization failed
+    ErrTransactionalIDAuthorizationFailed ErrorCode = C.RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED
+    // ErrSecurityDisabled Broker: Security features are disabled
+    ErrSecurityDisabled ErrorCode = C.RD_KAFKA_RESP_ERR_SECURITY_DISABLED
+    // ErrOperationNotAttempted Broker: Operation not attempted
+    ErrOperationNotAttempted ErrorCode = C.RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED
+    // ErrKafkaStorageError Broker: Disk error when trying to access log file on disk
+    ErrKafkaStorageError ErrorCode = C.RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR
+    // ErrLogDirNotFound Broker: The user-specified log directory is not found in the broker config
+    ErrLogDirNotFound ErrorCode = C.RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND
+    // ErrSaslAuthenticationFailed Broker: SASL Authentication failed
+    ErrSaslAuthenticationFailed ErrorCode = C.RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED
+    // ErrUnknownProducerID Broker: Unknown Producer Id
+    ErrUnknownProducerID ErrorCode = C.RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID
+    // ErrReassignmentInProgress Broker: Partition reassignment is in progress
+    ErrReassignmentInProgress ErrorCode = C.RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS
+    // ErrDelegationTokenAuthDisabled Broker: Delegation Token feature is not enabled
+    ErrDelegationTokenAuthDisabled ErrorCode = C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED
+    // ErrDelegationTokenNotFound Broker: Delegation Token is not found on server
+    ErrDelegationTokenNotFound ErrorCode = C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND
+    // ErrDelegationTokenOwnerMismatch Broker: Specified Principal is not valid Owner/Renewer
+    ErrDelegationTokenOwnerMismatch ErrorCode = C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH
+    // ErrDelegationTokenRequestNotAllowed Broker: Delegation Token requests are not allowed on this connection
+    ErrDelegationTokenRequestNotAllowed ErrorCode = C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED
+    // ErrDelegationTokenAuthorizationFailed Broker: Delegation Token authorization failed
+    ErrDelegationTokenAuthorizationFailed ErrorCode = C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED
+    // ErrDelegationTokenExpired Broker: Delegation Token is expired
+    ErrDelegationTokenExpired ErrorCode = C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED
+    // ErrInvalidPrincipalType Broker: Supplied principalType is not supported
+    ErrInvalidPrincipalType ErrorCode = C.RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE
+    // ErrNonEmptyGroup Broker: The group is not empty
+    ErrNonEmptyGroup ErrorCode = C.RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP
+    // ErrGroupIDNotFound Broker: The group id does not exist
+    ErrGroupIDNotFound ErrorCode = C.RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND
+    // ErrFetchSessionIDNotFound Broker: The fetch session ID was not found
+    ErrFetchSessionIDNotFound ErrorCode = C.RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND
+    // ErrInvalidFetchSessionEpoch Broker: The fetch session epoch is invalid
+    ErrInvalidFetchSessionEpoch ErrorCode = C.RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH
+    // ErrListenerNotFound Broker: No matching listener
+    ErrListenerNotFound ErrorCode = C.RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND
+    // ErrTopicDeletionDisabled Broker: Topic deletion is disabled
+    ErrTopicDeletionDisabled ErrorCode = C.RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED
+    // ErrFencedLeaderEpoch Broker: Leader epoch is older than broker epoch
+    ErrFencedLeaderEpoch ErrorCode = C.RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH
+    // ErrUnknownLeaderEpoch Broker: Leader epoch is newer than broker epoch
+    ErrUnknownLeaderEpoch ErrorCode = C.RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH
+    // ErrUnsupportedCompressionType Broker: Unsupported compression type
+    ErrUnsupportedCompressionType ErrorCode = C.RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE
+    // ErrStaleBrokerEpoch Broker: Broker epoch has changed
+    ErrStaleBrokerEpoch ErrorCode = C.RD_KAFKA_RESP_ERR_STALE_BROKER_EPOCH
+    // ErrOffsetNotAvailable Broker: Leader high watermark is not caught up
+    ErrOffsetNotAvailable ErrorCode = C.RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE
+    // ErrMemberIDRequired Broker: Group member needs a valid member ID
+    ErrMemberIDRequired ErrorCode = C.RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED
+    // ErrPreferredLeaderNotAvailable Broker: Preferred leader was not available
+    ErrPreferredLeaderNotAvailable ErrorCode = C.RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE
+    // ErrGroupMaxSizeReached Broker: Consumer group has reached maximum size
+    ErrGroupMaxSizeReached ErrorCode = C.RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED
+    // ErrFencedInstanceID Broker: Static consumer fenced by other consumer with same group.instance.id
+    ErrFencedInstanceID ErrorCode = C.RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID
+    // ErrEligibleLeadersNotAvailable Broker: Eligible partition leaders are not available
+    ErrEligibleLeadersNotAvailable ErrorCode = C.RD_KAFKA_RESP_ERR_ELIGIBLE_LEADERS_NOT_AVAILABLE
+    // ErrElectionNotNeeded Broker: Leader election not needed for topic partition
+    ErrElectionNotNeeded ErrorCode = C.RD_KAFKA_RESP_ERR_ELECTION_NOT_NEEDED
+    // ErrNoReassignmentInProgress Broker: No partition reassignment is in progress
+    ErrNoReassignmentInProgress ErrorCode = C.RD_KAFKA_RESP_ERR_NO_REASSIGNMENT_IN_PROGRESS
+    // ErrGroupSubscribedToTopic Broker: Deleting offsets of a topic while the consumer group is subscribed to it
+    ErrGroupSubscribedToTopic ErrorCode = C.RD_KAFKA_RESP_ERR_GROUP_SUBSCRIBED_TO_TOPIC
+    // ErrInvalidRecord Broker: Broker failed to validate record
+    ErrInvalidRecord ErrorCode = C.RD_KAFKA_RESP_ERR_INVALID_RECORD
+    // ErrUnstableOffsetCommit Broker: There are unstable offsets that need to be cleared
+    ErrUnstableOffsetCommit ErrorCode = C.RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT
+    // ErrThrottlingQuotaExceeded Broker: Throttling quota has been exceeded
+    ErrThrottlingQuotaExceeded ErrorCode = C.RD_KAFKA_RESP_ERR_THROTTLING_QUOTA_EXCEEDED
+    // ErrProducerFenced Broker: There is a newer producer with the same transactionalId which fences the current one
+    ErrProducerFenced ErrorCode = C.RD_KAFKA_RESP_ERR_PRODUCER_FENCED
+    // ErrResourceNotFound Broker: Request illegally referred to resource that does not exist
+    ErrResourceNotFound ErrorCode = C.RD_KAFKA_RESP_ERR_RESOURCE_NOT_FOUND
+    // ErrDuplicateResource Broker: Request illegally referred to the same resource twice
+    ErrDuplicateResource ErrorCode = C.RD_KAFKA_RESP_ERR_DUPLICATE_RESOURCE
+    // ErrUnacceptableCredential Broker: Requested credential would not meet criteria for acceptability
+    ErrUnacceptableCredential ErrorCode = C.RD_KAFKA_RESP_ERR_UNACCEPTABLE_CREDENTIAL
+    // ErrInconsistentVoterSet Broker: Indicates that the either the sender or recipient of a voter-only request is not one of the expected voters
+    ErrInconsistentVoterSet ErrorCode = C.RD_KAFKA_RESP_ERR_INCONSISTENT_VOTER_SET
+    // ErrInvalidUpdateVersion Broker: Invalid update version
+    ErrInvalidUpdateVersion ErrorCode = C.RD_KAFKA_RESP_ERR_INVALID_UPDATE_VERSION
+    // ErrFeatureUpdateFailed Broker: Unable to update finalized features due to server error
+    ErrFeatureUpdateFailed ErrorCode = C.RD_KAFKA_RESP_ERR_FEATURE_UPDATE_FAILED
+    // ErrPrincipalDeserializationFailure Broker: Request principal deserialization failed during forwarding
+    ErrPrincipalDeserializationFailure ErrorCode = C.RD_KAFKA_RESP_ERR_PRINCIPAL_DESERIALIZATION_FAILURE
+    // ErrUnknownTopicID Broker: Unknown topic id
+    ErrUnknownTopicID ErrorCode = C.RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_ID
+    // ErrFencedMemberEpoch Broker: The member epoch is fenced by the group coordinator
+    ErrFencedMemberEpoch ErrorCode = C.RD_KAFKA_RESP_ERR_FENCED_MEMBER_EPOCH
+    // ErrUnreleasedInstanceID Broker: The instance ID is still used by another member in the consumer group
+    ErrUnreleasedInstanceID ErrorCode = C.RD_KAFKA_RESP_ERR_UNRELEASED_INSTANCE_ID
+    // ErrUnsupportedAssignor Broker: The assignor or its version range is not supported by the consumer group
+    ErrUnsupportedAssignor ErrorCode = C.RD_KAFKA_RESP_ERR_UNSUPPORTED_ASSIGNOR
+    // ErrStaleMemberEpoch Broker: The member epoch is stale
+    ErrStaleMemberEpoch ErrorCode = C.RD_KAFKA_RESP_ERR_STALE_MEMBER_EPOCH
+    // ErrUnknownSubscriptionID Broker: Client sent a push telemetry request with an invalid or outdated subscription ID
+    ErrUnknownSubscriptionID ErrorCode = C.RD_KAFKA_RESP_ERR_UNKNOWN_SUBSCRIPTION_ID
+    // ErrTelemetryTooLarge Broker: Client sent a push telemetry request larger than the maximum size the broker will accept
+    ErrTelemetryTooLarge ErrorCode = C.RD_KAFKA_RESP_ERR_TELEMETRY_TOO_LARGE
+)
+

+ func (ErrorCode) + + String + + +

+
func (c ErrorCode) String() string
+

+ String returns a human readable representation of an error code +

+ type + + Event + + +

+

+ Event generic interface +

type Event interface {
+    // String returns a human-readable representation of the event
+    String() string
+}
+

+ type + + Handle + + +

+

+ Handle represents a generic client handle containing common parts for +both Producer and Consumer. +

type Handle interface {
+    // SetOAuthBearerToken sets the the data to be transmitted
+    // to a broker during SASL/OAUTHBEARER authentication. It will return nil
+    // on success, otherwise an error if:
+    // 1) the token data is invalid (meaning an expiration time in the past
+    // or either a token value or an extension key or value that does not meet
+    // the regular expression requirements as per
+    // https://tools.ietf.org/html/rfc7628#section-3.1);
+    // 2) SASL/OAUTHBEARER is not supported by the underlying librdkafka build;
+    // 3) SASL/OAUTHBEARER is supported but is not configured as the client's
+    // authentication mechanism.
+    SetOAuthBearerToken(oauthBearerToken OAuthBearerToken) error
+
+    // SetOAuthBearerTokenFailure sets the error message describing why token
+    // retrieval/setting failed; it also schedules a new token refresh event for 10
+    // seconds later so the attempt may be retried. It will return nil on
+    // success, otherwise an error if:
+    // 1) SASL/OAUTHBEARER is not supported by the underlying librdkafka build;
+    // 2) SASL/OAUTHBEARER is supported but is not configured as the client's
+    // authentication mechanism.
+    SetOAuthBearerTokenFailure(errstr string) error
+
+    // IsClosed() returns the bool to check if the client is closed
+    IsClosed() bool
+    // contains filtered or unexported methods
+}
+ +

+ Header represents a single Kafka message header. +

+ Message headers are made up of a list of Header elements, retaining their original insert +order and allowing for duplicate Keys. +

+ Key is a human readable string identifying the header. +Value is the key's binary value, Kafka does not put any restrictions on the format of +of the Value but it should be made relatively compact. +The value may be a byte array, empty, or nil. +

+ NOTE: Message headers are not available on producer delivery report messages. +

type Header struct {
+    Key   string // Header name (utf-8 string)
+    Value []byte // Header value (nil, empty, or binary)
+}
+
+

+ func (Header) + + String + + +

+
func (h Header) String() string
+

+ String returns the Header Key and data in a human representable possibly truncated form +suitable for displaying to the user. +

+ type + + IsolationLevel + + +

+

+ IsolationLevel is a type which is used for AdminOptions to set the IsolationLevel. +

type IsolationLevel int
+
const (
+    // IsolationLevelReadUncommitted - read uncommitted isolation level
+    IsolationLevelReadUncommitted IsolationLevel = C.RD_KAFKA_ISOLATION_LEVEL_READ_UNCOMMITTED
+    // IsolationLevelReadCommitted - read committed isolation level
+    IsolationLevelReadCommitted IsolationLevel = C.RD_KAFKA_ISOLATION_LEVEL_READ_COMMITTED
+)
+

+ type + + ListConsumerGroupOffsetsAdminOption + + +

+

+ ListConsumerGroupOffsetsAdminOption - see setter. +

+ See SetAdminRequestTimeout, SetAdminRequireStableOffsets. +

type ListConsumerGroupOffsetsAdminOption interface {
+    // contains filtered or unexported methods
+}
+

+ type + + ListConsumerGroupOffsetsResult + + +

+

+ ListConsumerGroupOffsetsResult represents the result of a +ListConsumerGroupOffsets operation. +

type ListConsumerGroupOffsetsResult struct {
+    // A slice of ConsumerGroupTopicPartitions, each element represents a group's
+    // TopicPartitions and Offsets.
+    ConsumerGroupsTopicPartitions []ConsumerGroupTopicPartitions
+}
+
+

+ type + + ListConsumerGroupsAdminOption + + +

+

+ ListConsumerGroupsAdminOption - see setter. +

+ See SetAdminRequestTimeout, SetAdminMatchConsumerGroupStates, SetAdminMatchConsumerGroupTypes. +

type ListConsumerGroupsAdminOption interface {
+    // contains filtered or unexported methods
+}
+

+ type + + ListConsumerGroupsResult + + +

+

+ ListConsumerGroupsResult represents ListConsumerGroups results and errors. +

type ListConsumerGroupsResult struct {
+    // List of valid ConsumerGroupListings.
+    Valid []ConsumerGroupListing
+    // List of errors.
+    Errors []error
+}
+
+

+ type + + ListOffsetsAdminOption + + +

+

+ ListOffsetsAdminOption - see setter. +

+ See SetAdminRequestTimeout, SetAdminIsolationLevel. +

type ListOffsetsAdminOption interface {
+    // contains filtered or unexported methods
+}
+

+ type + + ListOffsetsResult + + +

+

+ ListOffsetsResult holds the map of TopicPartition to ListOffsetsResultInfo for a request. +

type ListOffsetsResult struct {
+    ResultInfos map[TopicPartition]ListOffsetsResultInfo
+}
+
+

+ type + + ListOffsetsResultInfo + + +

+

+ ListOffsetsResultInfo describes the result of ListOffsets request for a Topic Partition. +

type ListOffsetsResultInfo struct {
+    Offset      Offset
+    Timestamp   int64
+    LeaderEpoch *int32
+    Error       Error
+}
+
+

+ type + + LogEvent + + +

+

+ LogEvent represent the log from librdkafka internal log queue +

type LogEvent struct {
+    Name      string    // Name of client instance
+    Tag       string    // Log tag that provides context to the log Message (e.g., "METADATA" or "GRPCOORD")
+    Message   string    // Log message
+    Level     int       // Log syslog level, lower is more critical.
+    Timestamp time.Time // Log timestamp
+}
+
+

+ func (LogEvent) + + String + + +

+
func (logEvent LogEvent) String() string
+

+ type + + MemberAssignment + + +

+

+ MemberAssignment represents the assignment of a consumer group member. +

type MemberAssignment struct {
+    // Partitions assigned to current member.
+    TopicPartitions []TopicPartition
+}
+
+

+ type + + MemberDescription + + +

+

+ MemberDescription represents the description of a consumer group member. +

type MemberDescription struct {
+    // Client id.
+    ClientID string
+    // Group instance id.
+    GroupInstanceID string
+    // Consumer id.
+    ConsumerID string
+    // Group member host.
+    Host string
+    // Member assignment.
+    Assignment MemberAssignment
+}
+
+

+ type + + Message + + +

+

+ Message represents a Kafka message +

type Message struct {
+    TopicPartition TopicPartition
+    Value          []byte
+    Key            []byte
+    Timestamp      time.Time
+    TimestampType  TimestampType
+    Opaque         interface{}
+    Headers        []Header
+    LeaderEpoch    *int32 // Deprecated: LeaderEpoch or nil if not available. Use m.TopicPartition.LeaderEpoch instead.
+}
+
+

+ func (*Message) + + String + + +

+
func (m *Message) String() string
+

+ String returns a human readable representation of a Message. +Key and payload are not represented. +

+ type + + Metadata + + +

+

+ Metadata contains broker and topic metadata for all (matching) topics +

type Metadata struct {
+    Brokers []BrokerMetadata
+    Topics  map[string]TopicMetadata
+
+    OriginatingBroker BrokerMetadata
+}
+
+

+ type + + MockCluster + + +

+

+ MockCluster represents a Kafka mock cluster instance which can be used +for testing. +

type MockCluster struct {
+    // contains filtered or unexported fields
+}
+
+

+ func + + NewMockCluster + + +

+
func NewMockCluster(brokerCount int) (*MockCluster, error)
+

+ NewMockCluster provides a mock Kafka cluster with a configurable +number of brokers that support a reasonable subset of Kafka protocol +operations, error injection, etc. +

+ The broker ids will start at 1 up to and including brokerCount. +

+ Mock clusters provide localhost listeners that can be used as the bootstrap +servers by multiple Kafka client instances. +

+ Currently supported functionality: +- Producer +- Idempotent Producer +- Transactional Producer +- Low-level consumer +- High-level balanced consumer groups with offset commits +- Topic Metadata and auto creation +

+ Warning THIS IS AN EXPERIMENTAL API, SUBJECT TO CHANGE OR REMOVAL. +

+ func (*MockCluster) + + BootstrapServers + + +

+
func (mc *MockCluster) BootstrapServers() string
+

+ BootstrapServers returns the bootstrap.servers property for this MockCluster +

+ func (*MockCluster) + + Close + + +

+
func (mc *MockCluster) Close()
+

+ Close and destroy the MockCluster +

+ func (*MockCluster) + + CreateTopic + + +

+
func (mc *MockCluster) CreateTopic(topic string, partitions, replicationFactor int) error
+

+ CreateTopic creates a topic without having to use a producer +

+ func (*MockCluster) + + SetBrokerDown + + +

+
func (mc *MockCluster) SetBrokerDown(brokerID int) error
+

+ SetBrokerDown disconnects the broker and disallows any new connections. +This does NOT trigger leader change. +Use brokerID -1 for all brokers, or >= 0 for a specific broker. +

+ func (*MockCluster) + + SetBrokerUp + + +

+
func (mc *MockCluster) SetBrokerUp(brokerID int) error
+

+ SetBrokerUp makes the broker accept connections again. +This does NOT trigger leader change. +Use brokerID -1 for all brokers, or >= 0 for a specific broker. +

+ func (*MockCluster) + + SetRoundtripDuration + + +

+
func (mc *MockCluster) SetRoundtripDuration(brokerID int, duration time.Duration) error
+

+ SetRoundtripDuration sets the broker round-trip-time delay for the given broker. +Use brokerID -1 for all brokers, or >= 0 for a specific broker. +

+ type + + Node + + +

+

+ Node represents a Kafka broker. +

type Node struct {
+    // Node id.
+    ID int
+    // Node host.
+    Host string
+    // Node port.
+    Port int
+    // Node rack (may be nil)
+    Rack *string
+}
+
+

+ func (Node) + + String + + +

+
func (n Node) String() string
+

+ type + + OAuthBearerToken + + +

+

+ OAuthBearerToken represents the data to be transmitted +to a broker during SASL/OAUTHBEARER authentication. +

type OAuthBearerToken struct {
+    // Token value, often (but not necessarily) a JWS compact serialization
+    // as per https://tools.ietf.org/html/rfc7515#section-3.1; it must meet
+    // the regular expression for a SASL/OAUTHBEARER value defined at
+    // https://tools.ietf.org/html/rfc7628#section-3.1
+    TokenValue string
+    // Metadata about the token indicating when it expires (local time);
+    // it must represent a time in the future
+    Expiration time.Time
+    // Metadata about the token indicating the Kafka principal name
+    // to which it applies (for example, "admin")
+    Principal string
+    // SASL extensions, if any, to be communicated to the broker during
+    // authentication (all keys and values of which must meet the regular
+    // expressions defined at https://tools.ietf.org/html/rfc7628#section-3.1,
+    // and it must not contain the reserved "auth" key)
+    Extensions map[string]string
+}
+
+

+ type + + OAuthBearerTokenRefresh + + +

+

+ OAuthBearerTokenRefresh indicates token refresh is required +

type OAuthBearerTokenRefresh struct {
+    // Config is the value of the sasl.oauthbearer.config property
+    Config string
+}
+
+

+ func (OAuthBearerTokenRefresh) + + String + + +

+
func (o OAuthBearerTokenRefresh) String() string
+

+ type + + Offset + + +

+

+ Offset type (int64) with support for canonical names +

type Offset int64
+

+ func + + NewOffset + + +

+
func NewOffset(offset interface{}) (Offset, error)
+

+ NewOffset creates a new Offset using the provided logical string, an +absolute int64 offset value, or a concrete Offset type. +Logical offsets: "beginning", "earliest", "end", "latest", "unset", "invalid", "stored" +

+ func + + OffsetTail + + +

+
func OffsetTail(relativeOffset Offset) Offset
+

+ OffsetTail returns the logical offset relativeOffset from current end of partition +

+ func (*Offset) + + Set + + +

+
func (o *Offset) Set(offset interface{}) error
+

+ Set offset value, see NewOffset() +

+ func (Offset) + + String + + +

+
func (o Offset) String() string
+

+ type + + OffsetSpec + + +

+

+ OffsetSpec specifies desired offsets while using ListOffsets. +

type OffsetSpec int64
+
const (
+    // MaxTimestampOffsetSpec is used to describe the offset with the Max Timestamp which may be different then LatestOffsetSpec as Timestamp can be set client side.
+    MaxTimestampOffsetSpec OffsetSpec = C.RD_KAFKA_OFFSET_SPEC_MAX_TIMESTAMP
+    // EarliestOffsetSpec is used to describe the earliest offset for the TopicPartition.
+    EarliestOffsetSpec OffsetSpec = C.RD_KAFKA_OFFSET_SPEC_EARLIEST
+    // LatestOffsetSpec is used to describe the latest offset for the TopicPartition.
+    LatestOffsetSpec OffsetSpec = C.RD_KAFKA_OFFSET_SPEC_LATEST
+)
+

+ func + + NewOffsetSpecForTimestamp + + +

+
func NewOffsetSpecForTimestamp(timestamp int64) OffsetSpec
+

+ NewOffsetSpecForTimestamp creates an OffsetSpec corresponding to the timestamp. +

+ type + + OffsetsCommitted + + +

+

+ OffsetsCommitted reports committed offsets +

type OffsetsCommitted struct {
+    Error   error
+    Offsets []TopicPartition
+}
+
+

+ func (OffsetsCommitted) + + String + + +

+
func (o OffsetsCommitted) String() string
+

+ type + + PartitionEOF + + +

+

+ PartitionEOF consumer reached end of partition +Needs to be explicitly enabled by setting the `enable.partition.eof` +configuration property to true. +

type PartitionEOF TopicPartition
+

+ func (PartitionEOF) + + String + + +

+
func (p PartitionEOF) String() string
+

+ type + + PartitionMetadata + + +

+

+ PartitionMetadata contains per-partition metadata +

type PartitionMetadata struct {
+    ID       int32
+    Error    Error
+    Leader   int32
+    Replicas []int32
+    Isrs     []int32
+}
+
+

+ type + + PartitionsSpecification + + +

+

+ PartitionsSpecification holds parameters for creating additional partitions for a topic. +PartitionsSpecification is analogous to NewPartitions in the Java Topic Admin API. +

type PartitionsSpecification struct {
+    // Topic to create more partitions for.
+    Topic string
+    // New partition count for topic, must be higher than current partition count.
+    IncreaseTo int
+    // (Optional) Explicit replica assignment. The outer array is
+    // indexed by the new partition index (i.e., 0 for the first added
+    // partition), while the inner per-partition array
+    // contains the replica broker ids. The first broker in each
+    // broker id list will be the preferred replica.
+    ReplicaAssignment [][]int32
+}
+
+

+ type + + Producer + + +

+

+ Producer implements a High-level Apache Kafka Producer instance +

type Producer struct {
+    // contains filtered or unexported fields
+}
+
+

+ func + + NewProducer + + +

+
func NewProducer(conf *ConfigMap) (*Producer, error)
+

+ NewProducer creates a new high-level Producer instance. +

+ conf is a *ConfigMap with standard librdkafka configuration properties. +

+ Supported special configuration properties (type, default): +

go.batch.producer (bool, false) - EXPERIMENTAL: Enable batch producer (for increased performance).
+                                  These batches do not relate to Kafka message batches in any way.
+                                  Note: timestamps and headers are not supported with this interface.
+go.delivery.reports (bool, true) - Forward per-message delivery reports to the
+                                   Events() channel.
+go.delivery.report.fields (string, "key,value") - Comma separated list of fields to enable for delivery reports.
+                                    Allowed values: all, none (or empty string), key, value, headers
+                                    Warning: There is a performance penalty to include headers in the delivery report.
+go.events.channel.size (int, 1000000) - Events().
+go.produce.channel.size (int, 1000000) - ProduceChannel() buffer size (in number of messages)
+go.logs.channel.enable (bool, false) - Forward log to Logs() channel.
+go.logs.channel (chan kafka.LogEvent, nil) - Forward logs to application-provided channel instead of Logs(). Requires go.logs.channel.enable=true.
+
+

+ func (*Producer) + + AbortTransaction + + +

+
func (p *Producer) AbortTransaction(ctx context.Context) error
+

+ AbortTransaction aborts the ongoing transaction. +

+ This function should also be used to recover from non-fatal abortable +transaction errors. +

+ Any outstanding messages will be purged and fail with +`ErrPurgeInflight` or `ErrPurgeQueue`. +

+ Parameters: +

    +
  • + `ctx` - The maximum amount of time to block, or nil for indefinite. +
  • +
+

+ Note: This function will block until all outstanding messages are purged +and the transaction abort request has been successfully +handled by the transaction coordinator, or until the `ctx` expires, +which ever comes first. On timeout the application may +call the function again. +

+ Note: Will automatically call `Purge()` and `Flush()` to ensure all queued +and in-flight messages are purged before attempting to abort the transaction. +The application MUST serve the `producer.Events()` channel for delivery +reports in a separate go-routine during this time. +

+ Returns nil on success or an error object on failure. +Check whether the returned error object permits retrying +by calling `err.(kafka.Error).IsRetriable()`, or whether a fatal error +has been raised by calling `err.(kafka.Error).IsFatal()`. +

+ func (*Producer) + + BeginTransaction + + +

+
func (p *Producer) BeginTransaction() error
+

+ BeginTransaction starts a new transaction. +

+ `InitTransactions()` must have been called successfully (once) +before this function is called. +

+ Upon successful return from this function the application has to perform at +least one of the following operations within `transaction.timeout.ms` to +avoid timing out the transaction on the broker: +

    +
  • + `Produce()` (et.al) +
  • + `SendOffsetsToTransaction()` +
  • + `CommitTransaction()` +
  • + `AbortTransaction()` +
  • + + + +
+

+ Any messages produced, offsets sent (`SendOffsetsToTransaction()`), +etc, after the successful return of this function will be part of +the transaction and committed or aborted atomatically. +

+ Finish the transaction by calling `CommitTransaction()` or +abort the transaction by calling `AbortTransaction()`. +

+ Returns nil on success or an error object on failure. +Check whether a fatal error has been raised by +calling `err.(kafka.Error).IsFatal()`. +

+ Note: With the transactional producer, `Produce()`, et.al, are only +allowed during an on-going transaction, as started with this function. +Any produce call outside an on-going transaction, or for a failed +transaction, will fail. +

+ func (*Producer) + + Close + + +

+
func (p *Producer) Close()
+

+ Close a Producer instance. +The Producer object or its channels are no longer usable after this call. +

+ func (*Producer) + + CommitTransaction + + +

+
func (p *Producer) CommitTransaction(ctx context.Context) error
+

+ CommitTransaction commits the current transaction. +

+ Any outstanding messages will be flushed (delivered) before actually +committing the transaction. +

+ If any of the outstanding messages fail permanently the current +transaction will enter the abortable error state and this +function will return an abortable error, in this case the application +must call `AbortTransaction()` before attempting a new +transaction with `BeginTransaction()`. +

+ Parameters: +

    +
  • + `ctx` - The maximum amount of time to block, or nil for indefinite. +
  • +
+

+ Note: This function will block until all outstanding messages are +delivered and the transaction commit request has been successfully +handled by the transaction coordinator, or until the `ctx` expires, +which ever comes first. On timeout the application may +call the function again. +

+ Note: Will automatically call `Flush()` to ensure all queued +messages are delivered before attempting to commit the transaction. +The application MUST serve the `producer.Events()` channel for delivery +reports in a separate go-routine during this time. +

+ Returns nil on success or an error object on failure. +Check whether the returned error object permits retrying +by calling `err.(kafka.Error).IsRetriable()`, or whether an abortable +or fatal error has been raised by calling +`err.(kafka.Error).TxnRequiresAbort()` or `err.(kafka.Error).IsFatal()` +respectively. +

+ func (*Producer) + + Events + + +

+
func (p *Producer) Events() chan Event
+

+ Events returns the Events channel (read) +

+ func (*Producer) + + Flush + + +

+
func (p *Producer) Flush(timeoutMs int) int
+

+ Flush and wait for outstanding messages and requests to complete delivery. +Runs until value reaches zero or on timeoutMs. +Returns the number of outstanding events still un-flushed. +BUG: Tries to include messages on ProduceChannel, but it's not guaranteed to be reliable. +

+ func (*Producer) + + GetFatalError + + +

+
func (p *Producer) GetFatalError() error
+

+ GetFatalError returns an Error object if the client instance has raised a fatal error, else nil. +

+ func (*Producer) + + GetMetadata + + +

+
func (p *Producer) GetMetadata(topic *string, allTopics bool, timeoutMs int) (*Metadata, error)
+

+ GetMetadata queries broker for cluster and topic metadata. +If topic is non-nil only information about that topic is returned, else if +allTopics is false only information about locally used topics is returned, +else information about all topics is returned. +GetMetadata is equivalent to listTopics, describeTopics and describeCluster in the Java API. +

+ func (*Producer) + + InitTransactions + + +

+
func (p *Producer) InitTransactions(ctx context.Context) error
+

+ InitTransactions Initializes transactions for the producer instance. +

+ This function ensures any transactions initiated by previous instances +of the producer with the same `transactional.id` are completed. +If the previous instance failed with a transaction in progress the +previous transaction will be aborted. +This function needs to be called before any other transactional or +produce functions are called when the `transactional.id` is configured. +

+ If the last transaction had begun completion (following transaction commit) +but not yet finished, this function will await the previous transaction's +completion. +

+ When any previous transactions have been fenced this function +will acquire the internal producer id and epoch, used in all future +transactional messages issued by this producer instance. +

+ Parameters: +

    +
  • + `ctx` - The maximum time to block, or nil for indefinite. +On timeout the operation may continue in the background, +depending on state, and it is okay to call `InitTransactions()` +again. +Providing a nil context or a context without a deadline uses +the timeout 2*transaction.timeout.ms. +
  • +
+

+ Returns nil on success or an error on failure. +Check whether the returned error object permits retrying +by calling `err.(kafka.Error).IsRetriable()`, or whether a fatal +error has been raised by calling `err.(kafka.Error).IsFatal()`. +

+ func (*Producer) + + IsClosed + + +

+
func (p *Producer) IsClosed() bool
+

+ IsClosed returns boolean representing if client is closed or not +

+ func (*Producer) + + Len + + +

+
func (p *Producer) Len() int
+

+ Len returns the number of messages and requests waiting to be transmitted to the broker +as well as delivery reports queued for the application. +BUG: Tries to include messages on ProduceChannel, but it's not guaranteed to be reliable. +

+ func (*Producer) + + Logs + + +

+
func (p *Producer) Logs() chan LogEvent
+

+ Logs returns the Log channel (if enabled), else nil +

+ func (*Producer) + + OffsetsForTimes + + +

+
func (p *Producer) OffsetsForTimes(times []TopicPartition, timeoutMs int) (offsets []TopicPartition, err error)
+

+ OffsetsForTimes looks up offsets by timestamp for the given partitions. +

+ The returned offset for each partition is the earliest offset whose +timestamp is greater than or equal to the given timestamp in the +corresponding partition. If the provided timestamp exceeds that of the +last message in the partition, a value of -1 will be returned. +

+ The timestamps to query are represented as `.Offset` in the `times` +argument and the looked up offsets are represented as `.Offset` in the returned +`offsets` list. +

+ The function will block for at most timeoutMs milliseconds. +

+ Duplicate Topic+Partitions are not supported. +Per-partition errors may be returned in the `.Error` field. +

+ func (*Producer) + + Produce + + +

+
func (p *Producer) Produce(msg *Message, deliveryChan chan Event) error
+

+ Produce single message. +This is an asynchronous call that enqueues the message on the internal +transmit queue, thus returning immediately. +The delivery report will be sent on the provided deliveryChan if specified, +or on the Producer object's Events() channel if not. +msg.Timestamp requires librdkafka >= 0.9.4 (else returns ErrNotImplemented), +api.version.request=true, and broker >= 0.10.0.0. +msg.Headers requires librdkafka >= 0.11.4 (else returns ErrNotImplemented), +api.version.request=true, and broker >= 0.11.0.0. +Returns an error if message could not be enqueued. +

+ func (*Producer) + + ProduceChannel + + +

+
func (p *Producer) ProduceChannel() chan *Message
+

+ ProduceChannel returns the produce *Message channel (write) +

+ Deprecated: ProduceChannel (channel based producer) is deprecated in favour +of Produce(). +Flush() and Len() are not guaranteed to be reliable with ProduceChannel. +

+ func (*Producer) + + Purge + + +

+
func (p *Producer) Purge(flags int) error
+

+ Purge messages currently handled by this producer instance. +

+ flags is a combination of PurgeQueue, PurgeInFlight and PurgeNonBlocking. +

+ The application will need to call Poll(), Flush() or read the Events() channel +after this call to serve delivery reports for the purged messages. +

+ Messages purged from internal queues fail with the delivery report +error code set to ErrPurgeQueue, while purged messages that +are in-flight to or from the broker will fail with the error code set to +ErrPurgeInflight. +

+ Warning: Purging messages that are in-flight to or from the broker +will ignore any sub-sequent acknowledgement for these messages +received from the broker, effectively making it impossible +for the application to know if the messages were successfully +produced or not. This may result in duplicate messages if the +application retries these messages at a later time. +

+ Note: This call may block for a short time while background thread +queues are purged. +

+ Returns nil on success, ErrInvalidArg if the purge flags are invalid or unknown. +

+ func (*Producer) + + QueryWatermarkOffsets + + +

+
func (p *Producer) QueryWatermarkOffsets(topic string, partition int32, timeoutMs int) (low, high int64, err error)
+

+ QueryWatermarkOffsets returns the broker's low and high offsets for the given topic +and partition. +

+ func (*Producer) + + SendOffsetsToTransaction + + +

+
func (p *Producer) SendOffsetsToTransaction(ctx context.Context, offsets []TopicPartition, consumerMetadata *ConsumerGroupMetadata) error
+

+ SendOffsetsToTransaction sends a list of topic partition offsets to the +consumer group coordinator for `consumerMetadata`, and marks the offsets +as part part of the current transaction. +These offsets will be considered committed only if the transaction is +committed successfully. +

+ The offsets should be the next message your application will consume, +i.e., the last processed message's offset + 1 for each partition. +Either track the offsets manually during processing or use +`consumer.Position()` (on the consumer) to get the current offsets for +the partitions assigned to the consumer. +

+ Use this method at the end of a consume-transform-produce loop prior +to committing the transaction with `CommitTransaction()`. +

+ Parameters: +

    +
  • + `ctx` - The maximum amount of time to block, or nil for indefinite. +
  • + `offsets` - List of offsets to commit to the consumer group upon +successful commit of the transaction. Offsets should be +the next message to consume, e.g., last processed message + 1. +
  • + `consumerMetadata` - The current consumer group metadata as returned by +`consumer.GetConsumerGroupMetadata()` on the consumer +instance the provided offsets were consumed from. +
  • + + +
+

+ Note: The consumer must disable auto commits (set `enable.auto.commit` to false on the consumer). +

+ Note: Logical and invalid offsets (e.g., OffsetInvalid) in +`offsets` will be ignored. If there are no valid offsets in +`offsets` the function will return nil and no action will be taken. +

+ Returns nil on success or an error object on failure. +Check whether the returned error object permits retrying +by calling `err.(kafka.Error).IsRetriable()`, or whether an abortable +or fatal error has been raised by calling +`err.(kafka.Error).TxnRequiresAbort()` or `err.(kafka.Error).IsFatal()` +respectively. +

+ func (*Producer) + + SetOAuthBearerToken + + +

+
func (p *Producer) SetOAuthBearerToken(oauthBearerToken OAuthBearerToken) error
+

+ SetOAuthBearerToken sets the the data to be transmitted +to a broker during SASL/OAUTHBEARER authentication. It will return nil +on success, otherwise an error if: +1) the token data is invalid (meaning an expiration time in the past +or either a token value or an extension key or value that does not meet +the regular expression requirements as per + + https://tools.ietf.org/html/rfc7628#section-3.1 + + ); +2) SASL/OAUTHBEARER is not supported by the underlying librdkafka build; +3) SASL/OAUTHBEARER is supported but is not configured as the client's +authentication mechanism. +

+ func (*Producer) + + SetOAuthBearerTokenFailure + + +

+
func (p *Producer) SetOAuthBearerTokenFailure(errstr string) error
+

+ SetOAuthBearerTokenFailure sets the error message describing why token +retrieval/setting failed; it also schedules a new token refresh event for 10 +seconds later so the attempt may be retried. It will return nil on +success, otherwise an error if: +1) SASL/OAUTHBEARER is not supported by the underlying librdkafka build; +2) SASL/OAUTHBEARER is supported but is not configured as the client's +authentication mechanism. +

+ func (*Producer) + + SetSaslCredentials + + +

+
func (p *Producer) SetSaslCredentials(username, password string) error
+

+ SetSaslCredentials sets the SASL credentials used for this producer. The new credentials +will overwrite the old ones (which were set when creating the producer or by a previous +call to SetSaslCredentials). The new credentials will be used the next time this +producer needs to authenticate to a broker. This method will not disconnect +existing broker connections that were established with the old credentials. +This method applies only to the SASL PLAIN and SCRAM mechanisms. +

+ func (*Producer) + + String + + +

+
func (p *Producer) String() string
+

+ String returns a human readable name for a Producer instance +

+ func (*Producer) + + TestFatalError + + +

+
func (p *Producer) TestFatalError(code ErrorCode, str string) ErrorCode
+

+ TestFatalError triggers a fatal error in the underlying client. +This is to be used strictly for testing purposes. +

+ type + + RebalanceCb + + +

+

+ RebalanceCb provides a per-Subscribe*() rebalance event callback. +The passed Event will be either AssignedPartitions or RevokedPartitions +

type RebalanceCb func(*Consumer, Event) error
+

+ type + + ResourcePatternType + + +

+

+ ResourcePatternType enumerates the different types of Kafka resource patterns. +

type ResourcePatternType int
+
const (
+    // ResourcePatternTypeUnknown is a resource pattern type not known or not set.
+    ResourcePatternTypeUnknown ResourcePatternType = C.RD_KAFKA_RESOURCE_PATTERN_UNKNOWN
+    // ResourcePatternTypeAny matches any resource, used for lookups.
+    ResourcePatternTypeAny ResourcePatternType = C.RD_KAFKA_RESOURCE_PATTERN_ANY
+    // ResourcePatternTypeMatch will perform pattern matching
+    ResourcePatternTypeMatch ResourcePatternType = C.RD_KAFKA_RESOURCE_PATTERN_MATCH
+    // ResourcePatternTypeLiteral matches a literal resource name
+    ResourcePatternTypeLiteral ResourcePatternType = C.RD_KAFKA_RESOURCE_PATTERN_LITERAL
+    // ResourcePatternTypePrefixed matches a prefixed resource name
+    ResourcePatternTypePrefixed ResourcePatternType = C.RD_KAFKA_RESOURCE_PATTERN_PREFIXED
+)
+

+ func + + ResourcePatternTypeFromString + + +

+
func ResourcePatternTypeFromString(patternTypeString string) (ResourcePatternType, error)
+

+ ResourcePatternTypeFromString translates a resource pattern type name to +a ResourcePatternType value. +

+ func (ResourcePatternType) + + String + + +

+
func (t ResourcePatternType) String() string
+

+ String returns the human-readable representation of a ResourcePatternType +

+ type + + ResourceType + + +

+

+ ResourceType represents an Apache Kafka resource type +

type ResourceType int
+
const (
+    // ResourceUnknown - Unknown
+    ResourceUnknown ResourceType = C.RD_KAFKA_RESOURCE_UNKNOWN
+    // ResourceAny - match any resource type (DescribeConfigs)
+    ResourceAny ResourceType = C.RD_KAFKA_RESOURCE_ANY
+    // ResourceTopic - Topic
+    ResourceTopic ResourceType = C.RD_KAFKA_RESOURCE_TOPIC
+    // ResourceGroup - Group
+    ResourceGroup ResourceType = C.RD_KAFKA_RESOURCE_GROUP
+    // ResourceBroker - Broker
+    ResourceBroker ResourceType = C.RD_KAFKA_RESOURCE_BROKER
+)
+

+ func + + ResourceTypeFromString + + +

+
func ResourceTypeFromString(typeString string) (ResourceType, error)
+

+ ResourceTypeFromString translates a resource type name/string to +a ResourceType value. +

+ func (ResourceType) + + String + + +

+
func (t ResourceType) String() string
+

+ String returns the human-readable representation of a ResourceType +

+ type + + RevokedPartitions + + +

+

+ RevokedPartitions consumer group rebalance event: revoked partition set +

type RevokedPartitions struct {
+    Partitions []TopicPartition
+}
+
+

+ func (RevokedPartitions) + + String + + +

+
func (e RevokedPartitions) String() string
+

+ type + + ScramCredentialInfo + + +

+

+ ScramCredentialInfo contains Mechanism and Iterations for a +SASL/SCRAM credential associated with a user. +

type ScramCredentialInfo struct {
+    // Iterations - positive number of iterations used when creating the credential
+    Iterations int
+    // Mechanism - SASL/SCRAM mechanism
+    Mechanism ScramMechanism
+}
+
+

+ type + + ScramMechanism + + +

+

+ ScramMechanism enumerates SASL/SCRAM mechanisms. +Used by `AdminClient.AlterUserScramCredentials` +and `AdminClient.DescribeUserScramCredentials`. +

type ScramMechanism int
+
const (
+    // ScramMechanismUnknown - Unknown SASL/SCRAM mechanism
+    ScramMechanismUnknown ScramMechanism = C.RD_KAFKA_SCRAM_MECHANISM_UNKNOWN
+    // ScramMechanismSHA256 - SCRAM-SHA-256 mechanism
+    ScramMechanismSHA256 ScramMechanism = C.RD_KAFKA_SCRAM_MECHANISM_SHA_256
+    // ScramMechanismSHA512 - SCRAM-SHA-512 mechanism
+    ScramMechanismSHA512 ScramMechanism = C.RD_KAFKA_SCRAM_MECHANISM_SHA_512
+)
+

+ func + + ScramMechanismFromString + + +

+
func ScramMechanismFromString(mechanism string) (ScramMechanism, error)
+

+ ScramMechanismFromString translates a Scram Mechanism name to +a ScramMechanism value. +

+ func (ScramMechanism) + + String + + +

+
func (o ScramMechanism) String() string
+

+ String returns the human-readable representation of an ScramMechanism +

+ type + + Stats + + +

+

+ Stats statistics event +

type Stats struct {
+    // contains filtered or unexported fields
+}
+
+

+ func (Stats) + + String + + +

+
func (e Stats) String() string
+

+ type + + TimestampType + + +

+

+ TimestampType is a the Message timestamp type or source +

type TimestampType int
+
const (
+    // TimestampNotAvailable indicates no timestamp was set, or not available due to lacking broker support
+    TimestampNotAvailable TimestampType = C.RD_KAFKA_TIMESTAMP_NOT_AVAILABLE
+    // TimestampCreateTime indicates timestamp set by producer (source time)
+    TimestampCreateTime TimestampType = C.RD_KAFKA_TIMESTAMP_CREATE_TIME
+    // TimestampLogAppendTime indicates timestamp set set by broker (store time)
+    TimestampLogAppendTime TimestampType = C.RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME
+)
+

+ func (TimestampType) + + String + + +

+
func (t TimestampType) String() string
+

+ type + + TopicCollection + + +

+

+ TopicCollection represents a collection of topics. +

type TopicCollection struct {
+    // contains filtered or unexported fields
+}
+
+

+ func + + NewTopicCollectionOfTopicNames + + +

+
func NewTopicCollectionOfTopicNames(names []string) TopicCollection
+

+ NewTopicCollectionOfTopicNames creates a new TopicCollection based on a list +of topic names. +

+ type + + TopicDescription + + +

+

+ TopicDescription represents the result of DescribeTopics for +a single topic. +

type TopicDescription struct {
+    // Topic name.
+    Name string
+    // Topic Id
+    TopicID UUID
+    // Error, if any, of the result. Check with `Error.Code() != ErrNoError`.
+    Error Error
+    // Is the topic internal to Kafka?
+    IsInternal bool
+    // Partitions' information list.
+    Partitions []TopicPartitionInfo
+    // Operations allowed for the topic (nil if not available or not requested).
+    AuthorizedOperations []ACLOperation
+}
+
+

+ type + + TopicMetadata + + +

+

+ TopicMetadata contains per-topic metadata +

type TopicMetadata struct {
+    Topic      string
+    Partitions []PartitionMetadata
+    Error      Error
+}
+
+

+ type + + TopicPartition + + +

+

+ TopicPartition is a generic placeholder for a Topic+Partition and optionally Offset. +

type TopicPartition struct {
+    Topic       *string
+    Partition   int32
+    Offset      Offset
+    Metadata    *string
+    Error       error
+    LeaderEpoch *int32 // LeaderEpoch or nil if not available
+}
+
+

+ func (TopicPartition) + + String + + +

+
func (p TopicPartition) String() string
+

+ type + + TopicPartitionInfo + + +

+

+ TopicPartitionInfo represents a specific partition's information inside a +TopicDescription. +

type TopicPartitionInfo struct {
+    // Partition id.
+    Partition int
+    // Leader broker.
+    Leader *Node
+    // Replicas of the partition.
+    Replicas []Node
+    // In-Sync-Replicas of the partition.
+    Isr []Node
+}
+
+

+ type + + TopicPartitions + + +

+

+ TopicPartitions is a slice of TopicPartitions that also implements +the sort interface +

type TopicPartitions []TopicPartition
+

+ func (TopicPartitions) + + Len + + +

+
func (tps TopicPartitions) Len() int
+

+ func (TopicPartitions) + + Less + + +

+
func (tps TopicPartitions) Less(i, j int) bool
+

+ func (TopicPartitions) + + Swap + + +

+
func (tps TopicPartitions) Swap(i, j int)
+

+ type + + TopicResult + + +

+

+ TopicResult provides per-topic operation result (error) information. +

type TopicResult struct {
+    // Topic name
+    Topic string
+    // Error, if any, of result. Check with `Error.Code() != ErrNoError`.
+    Error Error
+}
+
+

+ func (TopicResult) + + String + + +

+
func (t TopicResult) String() string
+

+ String returns a human-readable representation of a TopicResult. +

+ type + + TopicSpecification + + +

+

+ TopicSpecification holds parameters for creating a new topic. +TopicSpecification is analogous to NewTopic in the Java Topic Admin API. +

type TopicSpecification struct {
+    // Topic name to create.
+    Topic string
+    // Number of partitions in topic.
+    NumPartitions int
+    // Default replication factor for the topic's partitions, or zero
+    // if an explicit ReplicaAssignment is set.
+    ReplicationFactor int
+    // (Optional) Explicit replica assignment. The outer array is
+    // indexed by the partition number, while the inner per-partition array
+    // contains the replica broker ids. The first broker in each
+    // broker id list will be the preferred replica.
+    ReplicaAssignment [][]int32
+    // Topic configuration.
+    Config map[string]string
+}
+
+

+ type + + UUID + + +

+

+ UUID Kafka UUID representation +

type UUID struct {
+    // contains filtered or unexported fields
+}
+
+

+ func (UUID) + + GetLeastSignificantBits + + +

+
func (uuid UUID) GetLeastSignificantBits() int64
+

+ GetLeastSignificantBits returns Least Significant 64 bits of the 128 bits UUID +

+ func (UUID) + + GetMostSignificantBits + + +

+
func (uuid UUID) GetMostSignificantBits() int64
+

+ GetMostSignificantBits returns Most Significant 64 bits of the 128 bits UUID +

+ func (UUID) + + String + + +

+
func (uuid UUID) String() string
+

+ Base64 string representation of the UUID +

+ type + + UserScramCredentialDeletion + + +

+

+ UserScramCredentialDeletion is a request to delete +a SASL/SCRAM credential for a user. +

type UserScramCredentialDeletion struct {
+    // User - user name
+    User string
+    // Mechanism - SASL/SCRAM mechanism.
+    Mechanism ScramMechanism
+}
+
+

+ type + + UserScramCredentialUpsertion + + +

+

+ UserScramCredentialUpsertion is a request to update/insert +a SASL/SCRAM credential for a user. +

type UserScramCredentialUpsertion struct {
+    // User - user name
+    User string
+    // ScramCredentialInfo - the mechanism and iterations.
+    ScramCredentialInfo ScramCredentialInfo
+    // Password - password to HMAC before storage.
+    Password []byte
+    // Salt - salt to use. Will be generated randomly if nil. (optional)
+    Salt []byte
+}
+
+

+ type + + UserScramCredentialsDescription + + +

+

+ UserScramCredentialsDescription represent all SASL/SCRAM credentials +associated with a user that can be retrieved, or an error indicating +why credentials could not be retrieved. +

type UserScramCredentialsDescription struct {
+    // User - the user name.
+    User string
+    // ScramCredentialInfos - SASL/SCRAM credential representations for the user.
+    ScramCredentialInfos []ScramCredentialInfo
+    // Error - error corresponding to this user description.
+    Error Error
+}
+
+ +

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+
+ +
+ + + + diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_darwin_amd64.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_darwin_amd64.go similarity index 94% rename from vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_darwin_amd64.go rename to vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_darwin_amd64.go index 96ba817b..1ca75c4c 100644 --- a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_darwin_amd64.go +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_darwin_amd64.go @@ -10,4 +10,4 @@ package kafka import "C" // LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client -const LibrdkafkaLinkInfo = "static darwin_amd64 from librdkafka-static-bundle-v1.9.2.tgz" +const LibrdkafkaLinkInfo = "static darwin_amd64 from librdkafka-static-bundle-v2.6.1.tgz" diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_darwin_arm64.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_darwin_arm64.go similarity index 94% rename from vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_darwin_arm64.go rename to vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_darwin_arm64.go index d4d35c92..eb568ce7 100644 --- a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_darwin_arm64.go +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_darwin_arm64.go @@ -10,4 +10,4 @@ package kafka import "C" // LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client -const LibrdkafkaLinkInfo = "static darwin_arm64 from librdkafka-static-bundle-v1.9.2.tgz" +const LibrdkafkaLinkInfo = "static darwin_arm64 from librdkafka-static-bundle-v2.6.1.tgz" diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_dynamic.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_dynamic.go similarity index 91% rename from vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_dynamic.go rename to vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_dynamic.go index bb7da9ba..92458af8 100644 --- a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_dynamic.go +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_dynamic.go @@ -1,3 +1,4 @@ +//go:build dynamic // +build dynamic package kafka diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_glibc_linux.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_glibc_linux_amd64.go similarity index 70% rename from vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_glibc_linux.go rename to vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_glibc_linux_amd64.go index c30a1344..f5799a38 100644 --- a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_glibc_linux.go +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_glibc_linux_amd64.go @@ -6,8 +6,8 @@ package kafka // #cgo CFLAGS: -DUSE_VENDORED_LIBRDKAFKA -DLIBRDKAFKA_STATICLIB -// #cgo LDFLAGS: ${SRCDIR}/librdkafka_vendor/librdkafka_glibc_linux.a -lm -ldl -lpthread -lrt +// #cgo LDFLAGS: ${SRCDIR}/librdkafka_vendor/librdkafka_glibc_linux_amd64.a -lm -ldl -lpthread -lrt -lpthread -lrt import "C" // LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client -const LibrdkafkaLinkInfo = "static glibc_linux from librdkafka-static-bundle-v1.9.2.tgz" +const LibrdkafkaLinkInfo = "static glibc_linux_amd64 from librdkafka-static-bundle-v2.6.1.tgz" diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_glibc_linux_arm64.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_glibc_linux_arm64.go new file mode 100644 index 00000000..33506208 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_glibc_linux_arm64.go @@ -0,0 +1,13 @@ +// +build !dynamic +// +build !musl + +// This file was auto-generated by librdkafka_vendor/bundle-import.sh, DO NOT EDIT. + +package kafka + +// #cgo CFLAGS: -DUSE_VENDORED_LIBRDKAFKA -DLIBRDKAFKA_STATICLIB +// #cgo LDFLAGS: ${SRCDIR}/librdkafka_vendor/librdkafka_glibc_linux_arm64.a -lm -ldl -lpthread -lrt -lpthread -lrt +import "C" + +// LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client +const LibrdkafkaLinkInfo = "static glibc_linux_arm64 from librdkafka-static-bundle-v2.6.1.tgz" diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_musl_linux.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_musl_linux_amd64.go similarity index 70% rename from vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_musl_linux.go rename to vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_musl_linux_amd64.go index 44569e0e..995547a5 100644 --- a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_musl_linux.go +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_musl_linux_amd64.go @@ -6,8 +6,8 @@ package kafka // #cgo CFLAGS: -DUSE_VENDORED_LIBRDKAFKA -DLIBRDKAFKA_STATICLIB -// #cgo LDFLAGS: ${SRCDIR}/librdkafka_vendor/librdkafka_musl_linux.a -lm -ldl -lpthread -lrt -lpthread -lrt +// #cgo LDFLAGS: ${SRCDIR}/librdkafka_vendor/librdkafka_musl_linux_amd64.a -lm -ldl -lpthread -lrt -lpthread -lrt import "C" // LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client -const LibrdkafkaLinkInfo = "static musl_linux from librdkafka-static-bundle-v1.9.2.tgz" +const LibrdkafkaLinkInfo = "static musl_linux_amd64 from librdkafka-static-bundle-v2.6.1.tgz" diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_musl_linux_arm64.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_musl_linux_arm64.go new file mode 100644 index 00000000..0d43aab8 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_musl_linux_arm64.go @@ -0,0 +1,13 @@ +// +build !dynamic +// +build musl + +// This file was auto-generated by librdkafka_vendor/bundle-import.sh, DO NOT EDIT. + +package kafka + +// #cgo CFLAGS: -DUSE_VENDORED_LIBRDKAFKA -DLIBRDKAFKA_STATICLIB +// #cgo LDFLAGS: ${SRCDIR}/librdkafka_vendor/librdkafka_musl_linux_arm64.a -lm -ldl -lpthread -lrt -lpthread -lrt +import "C" + +// LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client +const LibrdkafkaLinkInfo = "static musl_linux_arm64 from librdkafka-static-bundle-v2.6.1.tgz" diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_windows.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_windows.go similarity index 95% rename from vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_windows.go rename to vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_windows.go index 07278870..de71c4d6 100644 --- a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/build_windows.go +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/build_windows.go @@ -6,8 +6,8 @@ package kafka // #cgo CFLAGS: -DUSE_VENDORED_LIBRDKAFKA -DLIBRDKAFKA_STATICLIB -// #cgo LDFLAGS: ${SRCDIR}/librdkafka_vendor/librdkafka_windows.a -lws2_32 -lsecur32 -lcrypt32 +// #cgo LDFLAGS: ${SRCDIR}/librdkafka_vendor/librdkafka_windows.a -lws2_32 -lsecur32 -lcrypt32 import "C" // LibrdkafkaLinkInfo explains how librdkafka was linked to the Go client -const LibrdkafkaLinkInfo = "static windows from librdkafka-static-bundle-v1.9.2.tgz" +const LibrdkafkaLinkInfo = "static windows from librdkafka-static-bundle-v2.6.1.tgz" diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/config.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/config.go similarity index 93% rename from vendor/github.com/confluentinc/confluent-kafka-go/kafka/config.go rename to vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/config.go index a6dfb8de..d9174b49 100644 --- a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/config.go +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/config.go @@ -18,6 +18,7 @@ package kafka import ( "fmt" + "go/types" "reflect" "strings" "unsafe" @@ -30,11 +31,12 @@ import ( import "C" // ConfigValue supports the following types: -// bool, int, string, any type with the standard String() interface +// +// bool, int, string, any type with the standard String() interface type ConfigValue interface{} // ConfigMap is a map containing standard librdkafka configuration properties as documented in: -// https://github.com/edenhill/librdkafka/tree/master/CONFIGURATION.md +// https://github.com/confluentinc/librdkafka/tree/master/CONFIGURATION.md // // The special property "default.topic.config" (optional) is a ConfigMap // containing default topic configuration properties. @@ -79,6 +81,7 @@ func (m ConfigMap) Set(kv string) error { func value2string(v ConfigValue) (ret string, errstr string) { + errstr = "" switch x := v.(type) { case bool: if x { @@ -90,13 +93,29 @@ func value2string(v ConfigValue) (ret string, errstr string) { ret = fmt.Sprintf("%d", x) case string: ret = x + case types.Slice: + ret = "" + arr := v.([]ConfigValue) + for _, i := range arr { + temp, err := value2string(i) + if err != "" { + ret = "" + errstr = fmt.Sprintf("Invalid value type %T", v) + break + } + ret += temp + "," + } + if len(ret) != 0 { + ret = ret[:len(ret)-1] + } case fmt.Stringer: ret = x.String() default: - return "", fmt.Sprintf("Invalid value type %T", v) + ret = "" + errstr = fmt.Sprintf("Invalid value type %T", v) } - return ret, "" + return ret, errstr } // rdkAnyconf abstracts rd_kafka_conf_t and rd_kafka_topic_conf_t diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/consumer.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/consumer.go similarity index 82% rename from vendor/github.com/confluentinc/confluent-kafka-go/kafka/consumer.go rename to vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/consumer.go index 2e1c9da6..efc54d00 100644 --- a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/consumer.go +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/consumer.go @@ -19,6 +19,7 @@ package kafka import ( "fmt" "math" + "sync/atomic" "time" "unsafe" ) @@ -47,6 +48,21 @@ type Consumer struct { rebalanceCb RebalanceCb appReassigned bool appRebalanceEnable bool // SerializerConfig setting + + isClosed uint32 + isClosing uint32 +} + +// IsClosed returns boolean representing if client is closed or not +func (c *Consumer) IsClosed() bool { + return atomic.LoadUint32(&c.isClosed) == 1 +} + +func (c *Consumer) verifyClient() error { + if c.IsClosed() { + return getOperationNotAllowedErrorForClosedClient() + } + return nil } // Strings returns a human readable name for a Consumer instance @@ -62,12 +78,20 @@ func (c *Consumer) gethandle() *handle { // Subscribe to a single topic // This replaces the current subscription func (c *Consumer) Subscribe(topic string, rebalanceCb RebalanceCb) error { + err := c.verifyClient() + if err != nil { + return err + } return c.SubscribeTopics([]string{topic}, rebalanceCb) } // SubscribeTopics subscribes to the provided list of topics. // This replaces the current subscription. func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) (err error) { + err = c.verifyClient() + if err != nil { + return err + } ctopics := C.rd_kafka_topic_partition_list_new(C.int(len(topics))) defer C.rd_kafka_topic_partition_list_destroy(ctopics) @@ -89,6 +113,10 @@ func (c *Consumer) SubscribeTopics(topics []string, rebalanceCb RebalanceCb) (er // Unsubscribe from the current subscription, if any. func (c *Consumer) Unsubscribe() (err error) { + err = c.verifyClient() + if err != nil { + return err + } C.rd_kafka_unsubscribe(c.handle.rk) return nil } @@ -103,6 +131,10 @@ func (c *Consumer) Unsubscribe() (err error) { // // This replaces the current assignment. func (c *Consumer) Assign(partitions []TopicPartition) (err error) { + err = c.verifyClient() + if err != nil { + return err + } c.appReassigned = true cparts := newCPartsFromTopicPartitions(partitions) @@ -118,6 +150,10 @@ func (c *Consumer) Assign(partitions []TopicPartition) (err error) { // Unassign the current set of partitions to consume. func (c *Consumer) Unassign() (err error) { + err = c.verifyClient() + if err != nil { + return err + } c.appReassigned = true e := C.rd_kafka_assign(c.handle.rk, nil) @@ -139,6 +175,10 @@ func (c *Consumer) Unassign() (err error) { // // The new partitions must not be part of the current assignment. func (c *Consumer) IncrementalAssign(partitions []TopicPartition) (err error) { + err = c.verifyClient() + if err != nil { + return err + } c.appReassigned = true cparts := newCPartsFromTopicPartitions(partitions) @@ -159,6 +199,10 @@ func (c *Consumer) IncrementalAssign(partitions []TopicPartition) (err error) { // // The removed partitions must be part of the current assignment. func (c *Consumer) IncrementalUnassign(partitions []TopicPartition) (err error) { + err = c.verifyClient() + if err != nil { + return err + } c.appReassigned = true cparts := newCPartsFromTopicPartitions(partitions) @@ -247,6 +291,10 @@ func (c *Consumer) commit(offsets []TopicPartition) (committedOffsets []TopicPar // This is a blocking call. // Returns the committed offsets on success. func (c *Consumer) Commit() ([]TopicPartition, error) { + err := c.verifyClient() + if err != nil { + return nil, err + } return c.commit(nil) } @@ -254,6 +302,10 @@ func (c *Consumer) Commit() ([]TopicPartition, error) { // This is a blocking call. // Returns the committed offsets on success. func (c *Consumer) CommitMessage(m *Message) ([]TopicPartition, error) { + err := c.verifyClient() + if err != nil { + return nil, err + } if m.TopicPartition.Error != nil { return nil, newErrorFromString(ErrInvalidArg, "Can't commit errored message") } @@ -266,6 +318,10 @@ func (c *Consumer) CommitMessage(m *Message) ([]TopicPartition, error) { // This is a blocking call. // Returns the committed offsets on success. func (c *Consumer) CommitOffsets(offsets []TopicPartition) ([]TopicPartition, error) { + err := c.verifyClient() + if err != nil { + return nil, err + } return c.commit(offsets) } @@ -277,6 +333,10 @@ func (c *Consumer) CommitOffsets(offsets []TopicPartition) ([]TopicPartition, er // an error and a list of offsets is returned. Each offset can be checked for // specific errors via its `.Error` member. func (c *Consumer) StoreOffsets(offsets []TopicPartition) (storedOffsets []TopicPartition, err error) { + err = c.verifyClient() + if err != nil { + return nil, err + } coffsets := newCPartsFromTopicPartitions(offsets) defer C.rd_kafka_topic_partition_list_destroy(coffsets) @@ -295,12 +355,21 @@ func (c *Consumer) StoreOffsets(offsets []TopicPartition) (storedOffsets []Topic // StoreMessage stores offset based on the provided message. // This is a convenience method that uses StoreOffsets to do the actual work. func (c *Consumer) StoreMessage(m *Message) (storedOffsets []TopicPartition, err error) { + err = c.verifyClient() + if err != nil { + return nil, err + } if m.TopicPartition.Error != nil { return nil, newErrorFromString(ErrInvalidArg, "Can't store errored message") } if m.TopicPartition.Offset < 0 { return nil, newErrorFromString(ErrInvalidArg, "Can't store message with offset less than 0") } + + if m.TopicPartition.LeaderEpoch != nil && *m.TopicPartition.LeaderEpoch < 0 { + return nil, newErrorFromString(ErrInvalidArg, "Can't store message with leader epoch less than 0") + } + offsets := []TopicPartition{m.TopicPartition} offsets[0].Offset++ return c.StoreOffsets(offsets) @@ -308,11 +377,12 @@ func (c *Consumer) StoreMessage(m *Message) (storedOffsets []TopicPartition, err // Seek seeks the given topic partitions using the offset from the TopicPartition. // -// If timeoutMs is not 0 the call will wait this long for the -// seek to be performed. If the timeout is reached the internal state -// will be unknown and this function returns ErrTimedOut. -// If timeoutMs is 0 it will initiate the seek but return -// immediately without any error reporting (e.g., async). +// The ignoredTimeoutMs parameter is ignored. Instead, this method blocks until +// the fetcher state is updated for the given partition with the new offset. +// This guarantees that no previously fetched messages for the old offset (or +// fetch position) will be passed to the application once this call returns. +// It will still take some time after the method returns until messages are +// fetched at the new offset. // // Seek() may only be used for partitions already being consumed // (through Assign() or implicitly through a self-rebalanced Subscribe()). @@ -320,24 +390,61 @@ func (c *Consumer) StoreMessage(m *Message) (storedOffsets []TopicPartition, err // a starting offset for each partition. // // Returns an error on failure or nil otherwise. -func (c *Consumer) Seek(partition TopicPartition, timeoutMs int) error { +// Deprecated: Seek is deprecated in favour of SeekPartitions(). +func (c *Consumer) Seek(partition TopicPartition, ignoredTimeoutMs int) error { + err := c.verifyClient() + if err != nil { + return err + } rkt := c.handle.getRkt(*partition.Topic) cErr := C.rd_kafka_seek(rkt, C.int32_t(partition.Partition), C.int64_t(partition.Offset), - C.int(timeoutMs)) + C.int(-1)) if cErr != C.RD_KAFKA_RESP_ERR_NO_ERROR { return newError(cErr) } return nil } +// SeekPartitions seeks the given topic partitions to the per-partition offset +// stored in the .Offset field of each partition. +// +// The offset may be either absolute (>= 0) or a logical offset (e.g. OffsetEnd). +// +// SeekPartitions() may only be used for partitions already being consumed +// (through Assign() or implicitly through a self-rebalanced Subscribe()). +// To set the starting offset it is preferred to use Assign() in a +// kafka.AssignedPartitions handler and provide a starting offset for each +// partition. +// +// Returns an error on failure or nil otherwise. Individual partition errors +// should be checked in the per-partition .Error field. +func (c *Consumer) SeekPartitions(partitions []TopicPartition) ([]TopicPartition, error) { + err := c.verifyClient() + if err != nil { + return nil, err + } + + cPartitions := newCPartsFromTopicPartitions(partitions) + defer C.rd_kafka_topic_partition_list_destroy(cPartitions) + + cErr := C.rd_kafka_seek_partitions( + c.handle.rk, cPartitions, -1 /* infinite timeout */) + if cErr != nil { + return nil, newErrorFromCErrorDestroy(cErr) + } + + return newTopicPartitionsFromCparts(cPartitions), nil +} + // Poll the consumer for messages or events. // -// Will block for at most timeoutMs milliseconds +// # Will block for at most timeoutMs milliseconds // // The following callbacks may be triggered: -// Subscribe()'s rebalanceCb +// +// Subscribe()'s rebalanceCb // // Returns nil on timeout, else an Event func (c *Consumer) Poll(timeoutMs int) (event Event) { @@ -346,6 +453,9 @@ func (c *Consumer) Poll(timeoutMs int) (event Event) { } // Events returns the Events channel (if enabled) +// +// Deprecated: Events (channel based consumer) is deprecated in favour +// of Poll(). func (c *Consumer) Events() chan Event { return c.events } @@ -364,7 +474,7 @@ func (c *Consumer) Logs() chan LogEvent { // a new message or error. `timeout` may be set to -1 for // indefinite wait. // -// Timeout is returned as (nil, err) where err is `err.(kafka.Error).Code() == kafka.ErrTimedOut`. +// Timeout is returned as (nil, err) where `err.(kafka.Error).IsTimeout() == true`. // // Messages are returned as (msg, nil), // while general errors are returned as (nil, err), @@ -372,8 +482,11 @@ func (c *Consumer) Logs() chan LogEvent { // msg.TopicPartition provides partition-specific information (such as topic, partition and offset). // // All other event types, such as PartitionEOF, AssignedPartitions, etc, are silently discarded. -// func (c *Consumer) ReadMessage(timeout time.Duration) (*Message, error) { + err := c.verifyClient() + if err != nil { + return nil, err + } var absTimeout time.Time var timeoutMs int @@ -416,6 +529,16 @@ func (c *Consumer) ReadMessage(timeout time.Duration) (*Message, error) { // Close Consumer instance. // The object is no longer usable after this call. func (c *Consumer) Close() (err error) { + // Check if the client is already closed. + err = c.verifyClient() + if err != nil { + return err + } + + // Client is in the process of closing. + if !atomic.CompareAndSwapUint32(&c.isClosing, 0, 1) { + return newErrorFromString(ErrState, "Consumer is already closing") + } // Wait for consumerReader() or pollLogEvents to terminate (by closing readerTermChan) close(c.readerTermChan) @@ -430,6 +553,9 @@ func (c *Consumer) Close() (err error) { c.Poll(100) } + // After this point, no more consumer methods may be called. + atomic.StoreUint32(&c.isClosed, 1) + // Destroy our queue C.rd_kafka_queue_destroy(c.handle.rkq) c.handle.rkq = nil @@ -446,14 +572,15 @@ func (c *Consumer) Close() (err error) { // conf is a *ConfigMap with standard librdkafka configuration properties. // // Supported special configuration properties: -// go.application.rebalance.enable (bool, false) - Forward rebalancing responsibility to application via the Events() channel. -// If set to true the app must handle the AssignedPartitions and -// RevokedPartitions events and call Assign() and Unassign() -// respectively. -// go.events.channel.enable (bool, false) - [deprecated] Enable the Events() channel. Messages and events will be pushed on the Events() channel and the Poll() interface will be disabled. -// go.events.channel.size (int, 1000) - Events() channel size -// go.logs.channel.enable (bool, false) - Forward log to Logs() channel. -// go.logs.channel (chan kafka.LogEvent, nil) - Forward logs to application-provided channel instead of Logs(). Requires go.logs.channel.enable=true. +// +// go.application.rebalance.enable (bool, false) - Forward rebalancing responsibility to application via the Events() channel. +// If set to true the app must handle the AssignedPartitions and +// RevokedPartitions events and call Assign() and Unassign() +// respectively. +// go.events.channel.enable (bool, false) - [deprecated] Enable the Events() channel. Messages and events will be pushed on the Events() channel and the Poll() interface will be disabled. +// go.events.channel.size (int, 1000) - Events() channel size +// go.logs.channel.enable (bool, false) - Forward log to Logs() channel. +// go.logs.channel (chan kafka.LogEvent, nil) - Forward logs to application-provided channel instead of Logs(). Requires go.logs.channel.enable=true. // // WARNING: Due to the buffering nature of channels (and queues in general) the // use of the events channel risks receiving outdated events and @@ -481,6 +608,7 @@ func NewConsumer(conf *ConfigMap) (*Consumer, error) { } c := &Consumer{} + c.isClosed = 0 v, err := confCopy.extract("go.application.rebalance.enable", false) if err != nil { @@ -571,11 +699,19 @@ func consumerReader(c *Consumer, termChan chan bool) { // else information about all topics is returned. // GetMetadata is equivalent to listTopics, describeTopics and describeCluster in the Java API. func (c *Consumer) GetMetadata(topic *string, allTopics bool, timeoutMs int) (*Metadata, error) { + err := c.verifyClient() + if err != nil { + return nil, err + } return getMetadata(c, topic, allTopics, timeoutMs) } // QueryWatermarkOffsets queries the broker for the low and high offsets for the given topic and partition. func (c *Consumer) QueryWatermarkOffsets(topic string, partition int32, timeoutMs int) (low, high int64, err error) { + err = c.verifyClient() + if err != nil { + return -1, -1, err + } return queryWatermarkOffsets(c, topic, partition, timeoutMs) } @@ -584,6 +720,10 @@ func (c *Consumer) QueryWatermarkOffsets(topic string, partition int32, timeoutM // The low offset is populated every statistics.interval.ms if that value is set. // OffsetInvalid will be returned if there is no cached offset for either value. func (c *Consumer) GetWatermarkOffsets(topic string, partition int32) (low, high int64, err error) { + err = c.verifyClient() + if err != nil { + return -1, -1, err + } return getWatermarkOffsets(c, topic, partition) } @@ -603,11 +743,19 @@ func (c *Consumer) GetWatermarkOffsets(topic string, partition int32) (low, high // Duplicate Topic+Partitions are not supported. // Per-partition errors may be returned in the `.Error` field. func (c *Consumer) OffsetsForTimes(times []TopicPartition, timeoutMs int) (offsets []TopicPartition, err error) { + err = c.verifyClient() + if err != nil { + return nil, err + } return offsetsForTimes(c, times, timeoutMs) } // Subscription returns the current subscription as set by Subscribe() func (c *Consumer) Subscription() (topics []string, err error) { + err = c.verifyClient() + if err != nil { + return nil, err + } var cTopics *C.rd_kafka_topic_partition_list_t cErr := C.rd_kafka_subscription(c.handle.rk, &cTopics) @@ -629,6 +777,10 @@ func (c *Consumer) Subscription() (topics []string, err error) { // Assignment returns the current partition assignments func (c *Consumer) Assignment() (partitions []TopicPartition, err error) { + err = c.verifyClient() + if err != nil { + return nil, err + } var cParts *C.rd_kafka_topic_partition_list_t cErr := C.rd_kafka_assignment(c.handle.rk, &cParts) @@ -644,6 +796,10 @@ func (c *Consumer) Assignment() (partitions []TopicPartition, err error) { // Committed retrieves committed offsets for the given set of partitions func (c *Consumer) Committed(partitions []TopicPartition, timeoutMs int) (offsets []TopicPartition, err error) { + err = c.verifyClient() + if err != nil { + return nil, err + } cparts := newCPartsFromTopicPartitions(partitions) defer C.rd_kafka_topic_partition_list_destroy(cparts) cerr := C.rd_kafka_committed(c.handle.rk, cparts, C.int(timeoutMs)) @@ -661,6 +817,10 @@ func (c *Consumer) Committed(partitions []TopicPartition, timeoutMs int) (offset // The consume position is the next message to read from the partition. // i.e., the offset of the last message seen by the application + 1. func (c *Consumer) Position(partitions []TopicPartition) (offsets []TopicPartition, err error) { + err = c.verifyClient() + if err != nil { + return nil, err + } cparts := newCPartsFromTopicPartitions(partitions) defer C.rd_kafka_topic_partition_list_destroy(cparts) cerr := C.rd_kafka_position(c.handle.rk, cparts) @@ -677,6 +837,10 @@ func (c *Consumer) Position(partitions []TopicPartition) (offsets []TopicPartiti // (if `go.events.channel.enable` has been set) will NOT be purged by // this call, set `go.events.channel.size` accordingly. func (c *Consumer) Pause(partitions []TopicPartition) (err error) { + err = c.verifyClient() + if err != nil { + return err + } cparts := newCPartsFromTopicPartitions(partitions) defer C.rd_kafka_topic_partition_list_destroy(cparts) cerr := C.rd_kafka_pause_partitions(c.handle.rk, cparts) @@ -688,6 +852,10 @@ func (c *Consumer) Pause(partitions []TopicPartition) (err error) { // Resume consumption for the provided list of partitions func (c *Consumer) Resume(partitions []TopicPartition) (err error) { + err = c.verifyClient() + if err != nil { + return err + } cparts := newCPartsFromTopicPartitions(partitions) defer C.rd_kafka_topic_partition_list_destroy(cparts) cerr := C.rd_kafka_resume_partitions(c.handle.rk, cparts) @@ -708,6 +876,10 @@ func (c *Consumer) Resume(partitions []TopicPartition) (err error) { // 3) SASL/OAUTHBEARER is supported but is not configured as the client's // authentication mechanism. func (c *Consumer) SetOAuthBearerToken(oauthBearerToken OAuthBearerToken) error { + err := c.verifyClient() + if err != nil { + return err + } return c.handle.setOAuthBearerToken(oauthBearerToken) } @@ -719,6 +891,10 @@ func (c *Consumer) SetOAuthBearerToken(oauthBearerToken OAuthBearerToken) error // 2) SASL/OAUTHBEARER is supported but is not configured as the client's // authentication mechanism. func (c *Consumer) SetOAuthBearerTokenFailure(errstr string) error { + err := c.verifyClient() + if err != nil { + return err + } return c.handle.setOAuthBearerTokenFailure(errstr) } @@ -764,6 +940,10 @@ func deserializeConsumerGroupMetadata(serialized []byte) (*C.rd_kafka_consumer_g // This object should be passed to the transactional producer's // SendOffsetsToTransaction() API. func (c *Consumer) GetConsumerGroupMetadata() (*ConsumerGroupMetadata, error) { + err := c.verifyClient() + if err != nil { + return nil, err + } cgmd := C.rd_kafka_consumer_group_metadata(c.handle.rk) if cgmd == nil { return nil, NewError(ErrState, "Consumer group metadata not available", false) @@ -907,3 +1087,17 @@ func (c *Consumer) handleRebalanceEvent(channel chan Event, rkev *C.rd_kafka_eve return nil } + +// SetSaslCredentials sets the SASL credentials used for this consumer. The new credentials +// will overwrite the old ones (which were set when creating the consumer or by a previous +// call to SetSaslCredentials). The new credentials will be used the next time the +// consumer needs to authenticate to a broker. This method will not disconnect +// existing broker connections that were established with the old credentials. +// This method applies only to the SASL PLAIN and SCRAM mechanisms. +func (c *Consumer) SetSaslCredentials(username, password string) error { + err := c.verifyClient() + if err != nil { + return err + } + return setSaslCredentials(c.handle.rk, username, password) +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/context.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/context.go similarity index 100% rename from vendor/github.com/confluentinc/confluent-kafka-go/kafka/context.go rename to vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/context.go diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/error.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/error.go similarity index 92% rename from vendor/github.com/confluentinc/confluent-kafka-go/kafka/error.go rename to vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/error.go index e14c1ca0..06c94bb4 100644 --- a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/error.go +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/error.go @@ -127,6 +127,12 @@ func (e Error) IsRetriable() bool { return e.retriable } +// IsTimeout returns true if the error is a timeout error. +// A timeout error indicates that the operation timed out locally. +func (e Error) IsTimeout() bool { + return e.code == ErrTimedOut || e.code == ErrTimedOutQueue +} + // TxnRequiresAbort returns true if the error is an abortable transaction error // that requires the application to abort the current transaction with // AbortTransaction() and start a new transaction with BeginTransaction() @@ -157,3 +163,7 @@ func getFatalError(H Handle) error { func testFatalError(H Handle, code ErrorCode, str string) ErrorCode { return ErrorCode(C.rd_kafka_test_fatal_error(H.gethandle().rk, C.rd_kafka_resp_err_t(code), C.CString(str))) } + +func getOperationNotAllowedErrorForClosedClient() error { + return newErrorFromString(ErrState, "Operation not allowed on closed client") +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/error_gen.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/error_gen.go similarity index 91% rename from vendor/github.com/confluentinc/confluent-kafka-go/kafka/error_gen.go rename to vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/error_gen.go index ccfaba28..312444a0 100644 --- a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/error_gen.go +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/error_gen.go @@ -57,7 +57,7 @@ func camelCase(s string) string { // librdkafka error codes. // This function is not intended for public use. func WriteErrorCodes(f *os.File) { - f.WriteString("package kafka\n") + f.WriteString("package kafka\n\n") now := time.Now() f.WriteString(fmt.Sprintf("// Copyright 2016-%d Confluent Inc.\n", now.Year())) f.WriteString(fmt.Sprintf("// AUTOMATICALLY GENERATED ON %v USING librdkafka %s\n", @@ -78,7 +78,7 @@ type ErrorCode int // String returns a human readable representation of an error code func (c ErrorCode) String() string { - return C.GoString(C.rd_kafka_err2str(C.rd_kafka_resp_err_t(c))) + return C.GoString(C.rd_kafka_err2str(C.rd_kafka_resp_err_t(c))) } const ( @@ -102,8 +102,8 @@ const ( errname = strings.Replace(errname, "Eof", "EOF", -1) errname = strings.Replace(errname, "Id", "ID", -1) - f.WriteString(fmt.Sprintf(" // %s %s\n", errname, desc)) - f.WriteString(fmt.Sprintf(" %s ErrorCode = ErrorCode(C.RD_KAFKA_RESP_ERR_%s)\n", + f.WriteString(fmt.Sprintf("\t// %s %s\n", errname, desc)) + f.WriteString(fmt.Sprintf("\t%s ErrorCode = C.RD_KAFKA_RESP_ERR_%s\n", errname, orig)) } diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/event.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/event.go similarity index 100% rename from vendor/github.com/confluentinc/confluent-kafka-go/kafka/event.go rename to vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/event.go diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/generated_errors.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/generated_errors.go new file mode 100644 index 00000000..a82bdbeb --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/generated_errors.go @@ -0,0 +1,356 @@ +package kafka + +// Copyright 2016-2024 Confluent Inc. +// AUTOMATICALLY GENERATED ON 2024-11-18 18:54:20.85854949 +0100 CET m=+0.000130522 USING librdkafka 2.6.1 + +/* +#include "select_rdkafka.h" +*/ +import "C" + +// ErrorCode is the integer representation of local and broker error codes +type ErrorCode int + +// String returns a human readable representation of an error code +func (c ErrorCode) String() string { + return C.GoString(C.rd_kafka_err2str(C.rd_kafka_resp_err_t(c))) +} + +const ( + // ErrBadMsg Local: Bad message format + ErrBadMsg ErrorCode = C.RD_KAFKA_RESP_ERR__BAD_MSG + // ErrBadCompression Local: Invalid compressed data + ErrBadCompression ErrorCode = C.RD_KAFKA_RESP_ERR__BAD_COMPRESSION + // ErrDestroy Local: Broker handle destroyed + ErrDestroy ErrorCode = C.RD_KAFKA_RESP_ERR__DESTROY + // ErrFail Local: Communication failure with broker + ErrFail ErrorCode = C.RD_KAFKA_RESP_ERR__FAIL + // ErrTransport Local: Broker transport failure + ErrTransport ErrorCode = C.RD_KAFKA_RESP_ERR__TRANSPORT + // ErrCritSysResource Local: Critical system resource failure + ErrCritSysResource ErrorCode = C.RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE + // ErrResolve Local: Host resolution failure + ErrResolve ErrorCode = C.RD_KAFKA_RESP_ERR__RESOLVE + // ErrMsgTimedOut Local: Message timed out + ErrMsgTimedOut ErrorCode = C.RD_KAFKA_RESP_ERR__MSG_TIMED_OUT + // ErrPartitionEOF Broker: No more messages + ErrPartitionEOF ErrorCode = C.RD_KAFKA_RESP_ERR__PARTITION_EOF + // ErrUnknownPartition Local: Unknown partition + ErrUnknownPartition ErrorCode = C.RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION + // ErrFs Local: File or filesystem error + ErrFs ErrorCode = C.RD_KAFKA_RESP_ERR__FS + // ErrUnknownTopic Local: Unknown topic + ErrUnknownTopic ErrorCode = C.RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC + // ErrAllBrokersDown Local: All broker connections are down + ErrAllBrokersDown ErrorCode = C.RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN + // ErrInvalidArg Local: Invalid argument or configuration + ErrInvalidArg ErrorCode = C.RD_KAFKA_RESP_ERR__INVALID_ARG + // ErrTimedOut Local: Timed out + ErrTimedOut ErrorCode = C.RD_KAFKA_RESP_ERR__TIMED_OUT + // ErrQueueFull Local: Queue full + ErrQueueFull ErrorCode = C.RD_KAFKA_RESP_ERR__QUEUE_FULL + // ErrIsrInsuff Local: ISR count insufficient + ErrIsrInsuff ErrorCode = C.RD_KAFKA_RESP_ERR__ISR_INSUFF + // ErrNodeUpdate Local: Broker node update + ErrNodeUpdate ErrorCode = C.RD_KAFKA_RESP_ERR__NODE_UPDATE + // ErrSsl Local: SSL error + ErrSsl ErrorCode = C.RD_KAFKA_RESP_ERR__SSL + // ErrWaitCoord Local: Waiting for coordinator + ErrWaitCoord ErrorCode = C.RD_KAFKA_RESP_ERR__WAIT_COORD + // ErrUnknownGroup Local: Unknown group + ErrUnknownGroup ErrorCode = C.RD_KAFKA_RESP_ERR__UNKNOWN_GROUP + // ErrInProgress Local: Operation in progress + ErrInProgress ErrorCode = C.RD_KAFKA_RESP_ERR__IN_PROGRESS + // ErrPrevInProgress Local: Previous operation in progress + ErrPrevInProgress ErrorCode = C.RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS + // ErrExistingSubscription Local: Existing subscription + ErrExistingSubscription ErrorCode = C.RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION + // ErrAssignPartitions Local: Assign partitions + ErrAssignPartitions ErrorCode = C.RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS + // ErrRevokePartitions Local: Revoke partitions + ErrRevokePartitions ErrorCode = C.RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS + // ErrConflict Local: Conflicting use + ErrConflict ErrorCode = C.RD_KAFKA_RESP_ERR__CONFLICT + // ErrState Local: Erroneous state + ErrState ErrorCode = C.RD_KAFKA_RESP_ERR__STATE + // ErrUnknownProtocol Local: Unknown protocol + ErrUnknownProtocol ErrorCode = C.RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL + // ErrNotImplemented Local: Not implemented + ErrNotImplemented ErrorCode = C.RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED + // ErrAuthentication Local: Authentication failure + ErrAuthentication ErrorCode = C.RD_KAFKA_RESP_ERR__AUTHENTICATION + // ErrNoOffset Local: No offset stored + ErrNoOffset ErrorCode = C.RD_KAFKA_RESP_ERR__NO_OFFSET + // ErrOutdated Local: Outdated + ErrOutdated ErrorCode = C.RD_KAFKA_RESP_ERR__OUTDATED + // ErrTimedOutQueue Local: Timed out in queue + ErrTimedOutQueue ErrorCode = C.RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE + // ErrUnsupportedFeature Local: Required feature not supported by broker + ErrUnsupportedFeature ErrorCode = C.RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE + // ErrWaitCache Local: Awaiting cache update + ErrWaitCache ErrorCode = C.RD_KAFKA_RESP_ERR__WAIT_CACHE + // ErrIntr Local: Operation interrupted + ErrIntr ErrorCode = C.RD_KAFKA_RESP_ERR__INTR + // ErrKeySerialization Local: Key serialization error + ErrKeySerialization ErrorCode = C.RD_KAFKA_RESP_ERR__KEY_SERIALIZATION + // ErrValueSerialization Local: Value serialization error + ErrValueSerialization ErrorCode = C.RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION + // ErrKeyDeserialization Local: Key deserialization error + ErrKeyDeserialization ErrorCode = C.RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION + // ErrValueDeserialization Local: Value deserialization error + ErrValueDeserialization ErrorCode = C.RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION + // ErrPartial Local: Partial response + ErrPartial ErrorCode = C.RD_KAFKA_RESP_ERR__PARTIAL + // ErrReadOnly Local: Read-only object + ErrReadOnly ErrorCode = C.RD_KAFKA_RESP_ERR__READ_ONLY + // ErrNoent Local: No such entry + ErrNoent ErrorCode = C.RD_KAFKA_RESP_ERR__NOENT + // ErrUnderflow Local: Read underflow + ErrUnderflow ErrorCode = C.RD_KAFKA_RESP_ERR__UNDERFLOW + // ErrInvalidType Local: Invalid type + ErrInvalidType ErrorCode = C.RD_KAFKA_RESP_ERR__INVALID_TYPE + // ErrRetry Local: Retry operation + ErrRetry ErrorCode = C.RD_KAFKA_RESP_ERR__RETRY + // ErrPurgeQueue Local: Purged in queue + ErrPurgeQueue ErrorCode = C.RD_KAFKA_RESP_ERR__PURGE_QUEUE + // ErrPurgeInflight Local: Purged in flight + ErrPurgeInflight ErrorCode = C.RD_KAFKA_RESP_ERR__PURGE_INFLIGHT + // ErrFatal Local: Fatal error + ErrFatal ErrorCode = C.RD_KAFKA_RESP_ERR__FATAL + // ErrInconsistent Local: Inconsistent state + ErrInconsistent ErrorCode = C.RD_KAFKA_RESP_ERR__INCONSISTENT + // ErrGaplessGuarantee Local: Gap-less ordering would not be guaranteed if proceeding + ErrGaplessGuarantee ErrorCode = C.RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE + // ErrMaxPollExceeded Local: Maximum application poll interval (max.poll.interval.ms) exceeded + ErrMaxPollExceeded ErrorCode = C.RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED + // ErrUnknownBroker Local: Unknown broker + ErrUnknownBroker ErrorCode = C.RD_KAFKA_RESP_ERR__UNKNOWN_BROKER + // ErrNotConfigured Local: Functionality not configured + ErrNotConfigured ErrorCode = C.RD_KAFKA_RESP_ERR__NOT_CONFIGURED + // ErrFenced Local: This instance has been fenced by a newer instance + ErrFenced ErrorCode = C.RD_KAFKA_RESP_ERR__FENCED + // ErrApplication Local: Application generated error + ErrApplication ErrorCode = C.RD_KAFKA_RESP_ERR__APPLICATION + // ErrAssignmentLost Local: Group partition assignment lost + ErrAssignmentLost ErrorCode = C.RD_KAFKA_RESP_ERR__ASSIGNMENT_LOST + // ErrNoop Local: No operation performed + ErrNoop ErrorCode = C.RD_KAFKA_RESP_ERR__NOOP + // ErrAutoOffsetReset Local: No offset to automatically reset to + ErrAutoOffsetReset ErrorCode = C.RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET + // ErrLogTruncation Local: Partition log truncation detected + ErrLogTruncation ErrorCode = C.RD_KAFKA_RESP_ERR__LOG_TRUNCATION + // ErrInvalidDifferentRecord Local: an invalid record in the same batch caused the failure of this message too. + ErrInvalidDifferentRecord ErrorCode = C.RD_KAFKA_RESP_ERR__INVALID_DIFFERENT_RECORD + // ErrUnknown Unknown broker error + ErrUnknown ErrorCode = C.RD_KAFKA_RESP_ERR_UNKNOWN + // ErrNoError Success + ErrNoError ErrorCode = C.RD_KAFKA_RESP_ERR_NO_ERROR + // ErrOffsetOutOfRange Broker: Offset out of range + ErrOffsetOutOfRange ErrorCode = C.RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE + // ErrInvalidMsg Broker: Invalid message + ErrInvalidMsg ErrorCode = C.RD_KAFKA_RESP_ERR_INVALID_MSG + // ErrUnknownTopicOrPart Broker: Unknown topic or partition + ErrUnknownTopicOrPart ErrorCode = C.RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART + // ErrInvalidMsgSize Broker: Invalid message size + ErrInvalidMsgSize ErrorCode = C.RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE + // ErrLeaderNotAvailable Broker: Leader not available + ErrLeaderNotAvailable ErrorCode = C.RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE + // ErrNotLeaderForPartition Broker: Not leader for partition + ErrNotLeaderForPartition ErrorCode = C.RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION + // ErrRequestTimedOut Broker: Request timed out + ErrRequestTimedOut ErrorCode = C.RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT + // ErrBrokerNotAvailable Broker: Broker not available + ErrBrokerNotAvailable ErrorCode = C.RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE + // ErrReplicaNotAvailable Broker: Replica not available + ErrReplicaNotAvailable ErrorCode = C.RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE + // ErrMsgSizeTooLarge Broker: Message size too large + ErrMsgSizeTooLarge ErrorCode = C.RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE + // ErrStaleCtrlEpoch Broker: StaleControllerEpochCode + ErrStaleCtrlEpoch ErrorCode = C.RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH + // ErrOffsetMetadataTooLarge Broker: Offset metadata string too large + ErrOffsetMetadataTooLarge ErrorCode = C.RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE + // ErrNetworkException Broker: Broker disconnected before response received + ErrNetworkException ErrorCode = C.RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION + // ErrCoordinatorLoadInProgress Broker: Coordinator load in progress + ErrCoordinatorLoadInProgress ErrorCode = C.RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS + // ErrCoordinatorNotAvailable Broker: Coordinator not available + ErrCoordinatorNotAvailable ErrorCode = C.RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE + // ErrNotCoordinator Broker: Not coordinator + ErrNotCoordinator ErrorCode = C.RD_KAFKA_RESP_ERR_NOT_COORDINATOR + // ErrTopicException Broker: Invalid topic + ErrTopicException ErrorCode = C.RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION + // ErrRecordListTooLarge Broker: Message batch larger than configured server segment size + ErrRecordListTooLarge ErrorCode = C.RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE + // ErrNotEnoughReplicas Broker: Not enough in-sync replicas + ErrNotEnoughReplicas ErrorCode = C.RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS + // ErrNotEnoughReplicasAfterAppend Broker: Message(s) written to insufficient number of in-sync replicas + ErrNotEnoughReplicasAfterAppend ErrorCode = C.RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND + // ErrInvalidRequiredAcks Broker: Invalid required acks value + ErrInvalidRequiredAcks ErrorCode = C.RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS + // ErrIllegalGeneration Broker: Specified group generation id is not valid + ErrIllegalGeneration ErrorCode = C.RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION + // ErrInconsistentGroupProtocol Broker: Inconsistent group protocol + ErrInconsistentGroupProtocol ErrorCode = C.RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL + // ErrInvalidGroupID Broker: Invalid group.id + ErrInvalidGroupID ErrorCode = C.RD_KAFKA_RESP_ERR_INVALID_GROUP_ID + // ErrUnknownMemberID Broker: Unknown member + ErrUnknownMemberID ErrorCode = C.RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID + // ErrInvalidSessionTimeout Broker: Invalid session timeout + ErrInvalidSessionTimeout ErrorCode = C.RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT + // ErrRebalanceInProgress Broker: Group rebalance in progress + ErrRebalanceInProgress ErrorCode = C.RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS + // ErrInvalidCommitOffsetSize Broker: Commit offset data size is not valid + ErrInvalidCommitOffsetSize ErrorCode = C.RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE + // ErrTopicAuthorizationFailed Broker: Topic authorization failed + ErrTopicAuthorizationFailed ErrorCode = C.RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED + // ErrGroupAuthorizationFailed Broker: Group authorization failed + ErrGroupAuthorizationFailed ErrorCode = C.RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED + // ErrClusterAuthorizationFailed Broker: Cluster authorization failed + ErrClusterAuthorizationFailed ErrorCode = C.RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED + // ErrInvalidTimestamp Broker: Invalid timestamp + ErrInvalidTimestamp ErrorCode = C.RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP + // ErrUnsupportedSaslMechanism Broker: Unsupported SASL mechanism + ErrUnsupportedSaslMechanism ErrorCode = C.RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM + // ErrIllegalSaslState Broker: Request not valid in current SASL state + ErrIllegalSaslState ErrorCode = C.RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE + // ErrUnsupportedVersion Broker: API version not supported + ErrUnsupportedVersion ErrorCode = C.RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION + // ErrTopicAlreadyExists Broker: Topic already exists + ErrTopicAlreadyExists ErrorCode = C.RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS + // ErrInvalidPartitions Broker: Invalid number of partitions + ErrInvalidPartitions ErrorCode = C.RD_KAFKA_RESP_ERR_INVALID_PARTITIONS + // ErrInvalidReplicationFactor Broker: Invalid replication factor + ErrInvalidReplicationFactor ErrorCode = C.RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR + // ErrInvalidReplicaAssignment Broker: Invalid replica assignment + ErrInvalidReplicaAssignment ErrorCode = C.RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT + // ErrInvalidConfig Broker: Configuration is invalid + ErrInvalidConfig ErrorCode = C.RD_KAFKA_RESP_ERR_INVALID_CONFIG + // ErrNotController Broker: Not controller for cluster + ErrNotController ErrorCode = C.RD_KAFKA_RESP_ERR_NOT_CONTROLLER + // ErrInvalidRequest Broker: Invalid request + ErrInvalidRequest ErrorCode = C.RD_KAFKA_RESP_ERR_INVALID_REQUEST + // ErrUnsupportedForMessageFormat Broker: Message format on broker does not support request + ErrUnsupportedForMessageFormat ErrorCode = C.RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT + // ErrPolicyViolation Broker: Policy violation + ErrPolicyViolation ErrorCode = C.RD_KAFKA_RESP_ERR_POLICY_VIOLATION + // ErrOutOfOrderSequenceNumber Broker: Broker received an out of order sequence number + ErrOutOfOrderSequenceNumber ErrorCode = C.RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER + // ErrDuplicateSequenceNumber Broker: Broker received a duplicate sequence number + ErrDuplicateSequenceNumber ErrorCode = C.RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER + // ErrInvalidProducerEpoch Broker: Producer attempted an operation with an old epoch + ErrInvalidProducerEpoch ErrorCode = C.RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH + // ErrInvalidTxnState Broker: Producer attempted a transactional operation in an invalid state + ErrInvalidTxnState ErrorCode = C.RD_KAFKA_RESP_ERR_INVALID_TXN_STATE + // ErrInvalidProducerIDMapping Broker: Producer attempted to use a producer id which is not currently assigned to its transactional id + ErrInvalidProducerIDMapping ErrorCode = C.RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING + // ErrInvalidTransactionTimeout Broker: Transaction timeout is larger than the maximum value allowed by the broker's max.transaction.timeout.ms + ErrInvalidTransactionTimeout ErrorCode = C.RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT + // ErrConcurrentTransactions Broker: Producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing + ErrConcurrentTransactions ErrorCode = C.RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS + // ErrTransactionCoordinatorFenced Broker: Indicates that the transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer + ErrTransactionCoordinatorFenced ErrorCode = C.RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED + // ErrTransactionalIDAuthorizationFailed Broker: Transactional Id authorization failed + ErrTransactionalIDAuthorizationFailed ErrorCode = C.RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED + // ErrSecurityDisabled Broker: Security features are disabled + ErrSecurityDisabled ErrorCode = C.RD_KAFKA_RESP_ERR_SECURITY_DISABLED + // ErrOperationNotAttempted Broker: Operation not attempted + ErrOperationNotAttempted ErrorCode = C.RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED + // ErrKafkaStorageError Broker: Disk error when trying to access log file on disk + ErrKafkaStorageError ErrorCode = C.RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR + // ErrLogDirNotFound Broker: The user-specified log directory is not found in the broker config + ErrLogDirNotFound ErrorCode = C.RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND + // ErrSaslAuthenticationFailed Broker: SASL Authentication failed + ErrSaslAuthenticationFailed ErrorCode = C.RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED + // ErrUnknownProducerID Broker: Unknown Producer Id + ErrUnknownProducerID ErrorCode = C.RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID + // ErrReassignmentInProgress Broker: Partition reassignment is in progress + ErrReassignmentInProgress ErrorCode = C.RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS + // ErrDelegationTokenAuthDisabled Broker: Delegation Token feature is not enabled + ErrDelegationTokenAuthDisabled ErrorCode = C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED + // ErrDelegationTokenNotFound Broker: Delegation Token is not found on server + ErrDelegationTokenNotFound ErrorCode = C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND + // ErrDelegationTokenOwnerMismatch Broker: Specified Principal is not valid Owner/Renewer + ErrDelegationTokenOwnerMismatch ErrorCode = C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH + // ErrDelegationTokenRequestNotAllowed Broker: Delegation Token requests are not allowed on this connection + ErrDelegationTokenRequestNotAllowed ErrorCode = C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED + // ErrDelegationTokenAuthorizationFailed Broker: Delegation Token authorization failed + ErrDelegationTokenAuthorizationFailed ErrorCode = C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED + // ErrDelegationTokenExpired Broker: Delegation Token is expired + ErrDelegationTokenExpired ErrorCode = C.RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED + // ErrInvalidPrincipalType Broker: Supplied principalType is not supported + ErrInvalidPrincipalType ErrorCode = C.RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE + // ErrNonEmptyGroup Broker: The group is not empty + ErrNonEmptyGroup ErrorCode = C.RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP + // ErrGroupIDNotFound Broker: The group id does not exist + ErrGroupIDNotFound ErrorCode = C.RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND + // ErrFetchSessionIDNotFound Broker: The fetch session ID was not found + ErrFetchSessionIDNotFound ErrorCode = C.RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND + // ErrInvalidFetchSessionEpoch Broker: The fetch session epoch is invalid + ErrInvalidFetchSessionEpoch ErrorCode = C.RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH + // ErrListenerNotFound Broker: No matching listener + ErrListenerNotFound ErrorCode = C.RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND + // ErrTopicDeletionDisabled Broker: Topic deletion is disabled + ErrTopicDeletionDisabled ErrorCode = C.RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED + // ErrFencedLeaderEpoch Broker: Leader epoch is older than broker epoch + ErrFencedLeaderEpoch ErrorCode = C.RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH + // ErrUnknownLeaderEpoch Broker: Leader epoch is newer than broker epoch + ErrUnknownLeaderEpoch ErrorCode = C.RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH + // ErrUnsupportedCompressionType Broker: Unsupported compression type + ErrUnsupportedCompressionType ErrorCode = C.RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE + // ErrStaleBrokerEpoch Broker: Broker epoch has changed + ErrStaleBrokerEpoch ErrorCode = C.RD_KAFKA_RESP_ERR_STALE_BROKER_EPOCH + // ErrOffsetNotAvailable Broker: Leader high watermark is not caught up + ErrOffsetNotAvailable ErrorCode = C.RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE + // ErrMemberIDRequired Broker: Group member needs a valid member ID + ErrMemberIDRequired ErrorCode = C.RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED + // ErrPreferredLeaderNotAvailable Broker: Preferred leader was not available + ErrPreferredLeaderNotAvailable ErrorCode = C.RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE + // ErrGroupMaxSizeReached Broker: Consumer group has reached maximum size + ErrGroupMaxSizeReached ErrorCode = C.RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED + // ErrFencedInstanceID Broker: Static consumer fenced by other consumer with same group.instance.id + ErrFencedInstanceID ErrorCode = C.RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID + // ErrEligibleLeadersNotAvailable Broker: Eligible partition leaders are not available + ErrEligibleLeadersNotAvailable ErrorCode = C.RD_KAFKA_RESP_ERR_ELIGIBLE_LEADERS_NOT_AVAILABLE + // ErrElectionNotNeeded Broker: Leader election not needed for topic partition + ErrElectionNotNeeded ErrorCode = C.RD_KAFKA_RESP_ERR_ELECTION_NOT_NEEDED + // ErrNoReassignmentInProgress Broker: No partition reassignment is in progress + ErrNoReassignmentInProgress ErrorCode = C.RD_KAFKA_RESP_ERR_NO_REASSIGNMENT_IN_PROGRESS + // ErrGroupSubscribedToTopic Broker: Deleting offsets of a topic while the consumer group is subscribed to it + ErrGroupSubscribedToTopic ErrorCode = C.RD_KAFKA_RESP_ERR_GROUP_SUBSCRIBED_TO_TOPIC + // ErrInvalidRecord Broker: Broker failed to validate record + ErrInvalidRecord ErrorCode = C.RD_KAFKA_RESP_ERR_INVALID_RECORD + // ErrUnstableOffsetCommit Broker: There are unstable offsets that need to be cleared + ErrUnstableOffsetCommit ErrorCode = C.RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT + // ErrThrottlingQuotaExceeded Broker: Throttling quota has been exceeded + ErrThrottlingQuotaExceeded ErrorCode = C.RD_KAFKA_RESP_ERR_THROTTLING_QUOTA_EXCEEDED + // ErrProducerFenced Broker: There is a newer producer with the same transactionalId which fences the current one + ErrProducerFenced ErrorCode = C.RD_KAFKA_RESP_ERR_PRODUCER_FENCED + // ErrResourceNotFound Broker: Request illegally referred to resource that does not exist + ErrResourceNotFound ErrorCode = C.RD_KAFKA_RESP_ERR_RESOURCE_NOT_FOUND + // ErrDuplicateResource Broker: Request illegally referred to the same resource twice + ErrDuplicateResource ErrorCode = C.RD_KAFKA_RESP_ERR_DUPLICATE_RESOURCE + // ErrUnacceptableCredential Broker: Requested credential would not meet criteria for acceptability + ErrUnacceptableCredential ErrorCode = C.RD_KAFKA_RESP_ERR_UNACCEPTABLE_CREDENTIAL + // ErrInconsistentVoterSet Broker: Indicates that the either the sender or recipient of a voter-only request is not one of the expected voters + ErrInconsistentVoterSet ErrorCode = C.RD_KAFKA_RESP_ERR_INCONSISTENT_VOTER_SET + // ErrInvalidUpdateVersion Broker: Invalid update version + ErrInvalidUpdateVersion ErrorCode = C.RD_KAFKA_RESP_ERR_INVALID_UPDATE_VERSION + // ErrFeatureUpdateFailed Broker: Unable to update finalized features due to server error + ErrFeatureUpdateFailed ErrorCode = C.RD_KAFKA_RESP_ERR_FEATURE_UPDATE_FAILED + // ErrPrincipalDeserializationFailure Broker: Request principal deserialization failed during forwarding + ErrPrincipalDeserializationFailure ErrorCode = C.RD_KAFKA_RESP_ERR_PRINCIPAL_DESERIALIZATION_FAILURE + // ErrUnknownTopicID Broker: Unknown topic id + ErrUnknownTopicID ErrorCode = C.RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_ID + // ErrFencedMemberEpoch Broker: The member epoch is fenced by the group coordinator + ErrFencedMemberEpoch ErrorCode = C.RD_KAFKA_RESP_ERR_FENCED_MEMBER_EPOCH + // ErrUnreleasedInstanceID Broker: The instance ID is still used by another member in the consumer group + ErrUnreleasedInstanceID ErrorCode = C.RD_KAFKA_RESP_ERR_UNRELEASED_INSTANCE_ID + // ErrUnsupportedAssignor Broker: The assignor or its version range is not supported by the consumer group + ErrUnsupportedAssignor ErrorCode = C.RD_KAFKA_RESP_ERR_UNSUPPORTED_ASSIGNOR + // ErrStaleMemberEpoch Broker: The member epoch is stale + ErrStaleMemberEpoch ErrorCode = C.RD_KAFKA_RESP_ERR_STALE_MEMBER_EPOCH + // ErrUnknownSubscriptionID Broker: Client sent a push telemetry request with an invalid or outdated subscription ID + ErrUnknownSubscriptionID ErrorCode = C.RD_KAFKA_RESP_ERR_UNKNOWN_SUBSCRIPTION_ID + // ErrTelemetryTooLarge Broker: Client sent a push telemetry request larger than the maximum size the broker will accept + ErrTelemetryTooLarge ErrorCode = C.RD_KAFKA_RESP_ERR_TELEMETRY_TOO_LARGE +) diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/glue_rdkafka.h b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/glue_rdkafka.h similarity index 100% rename from vendor/github.com/confluentinc/confluent-kafka-go/kafka/glue_rdkafka.h rename to vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/glue_rdkafka.h diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/handle.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/handle.go similarity index 98% rename from vendor/github.com/confluentinc/confluent-kafka-go/kafka/handle.go rename to vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/handle.go index dc1c1716..643d80bc 100644 --- a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/handle.go +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/handle.go @@ -77,6 +77,12 @@ type Handle interface { // gethandle() returns the internal handle struct pointer gethandle() *handle + + // verifyClient() returns the validity of client + verifyClient() error + + // IsClosed() returns the bool to check if the client is closed + IsClosed() bool } // Common instance handle for both Producer and Consumer diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/header.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/header.go similarity index 100% rename from vendor/github.com/confluentinc/confluent-kafka-go/kafka/header.go rename to vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/header.go diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/kafka.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/kafka.go similarity index 77% rename from vendor/github.com/confluentinc/confluent-kafka-go/kafka/kafka.go rename to vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/kafka.go index 254edbdb..d349f0e8 100644 --- a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/kafka.go +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/kafka.go @@ -17,8 +17,7 @@ // Package kafka provides high-level Apache Kafka producer and consumers // using bindings on-top of the librdkafka C library. // -// -// High-level Consumer +// # High-level Consumer // // * Decide if you want to read messages and events by calling `.Poll()` or // the deprecated option of using the `.Events()` channel. (If you want to use @@ -61,9 +60,7 @@ // * When you are done consuming call `.Close()` to commit final offsets // and leave the consumer group. // -// -// -// Producer +// # Producer // // * Create a Producer with `kafka.NewProducer()` providing at least // the `bootstrap.servers` configuration properties. @@ -92,8 +89,7 @@ // // * Finally call `.Close()` to decommission the producer. // -// -// Transactional producer API +// # Transactional producer API // // The transactional producer operates on top of the idempotent producer, // and provides full exactly-once semantics (EOS) for Apache Kafka when used @@ -185,27 +181,27 @@ // neither the retriable or abortable flags set, as fatal. // // Error handling example: -// retry: // -// err := producer.CommitTransaction(...) -// if err == nil { -// return nil -// } else if err.(kafka.Error).TxnRequiresAbort() { -// do_abort_transaction_and_reset_inputs() -// } else if err.(kafka.Error).IsRetriable() { -// goto retry -// } else { // treat all other errors as fatal errors -// panic(err) -// } +// retry: // +// err := producer.CommitTransaction(...) +// if err == nil { +// return nil +// } else if err.(kafka.Error).TxnRequiresAbort() { +// do_abort_transaction_and_reset_inputs() +// } else if err.(kafka.Error).IsRetriable() { +// goto retry +// } else { // treat all other errors as fatal errors +// panic(err) +// } // -// Events +// # Events // // Apart from emitting messages and delivery reports the client also communicates // with the application through a number of different event types. // An application may choose to handle or ignore these events. // -// Consumer events +// # Consumer events // // * `*kafka.Message` - a fetched message. // @@ -221,14 +217,12 @@ // // * `OffsetsCommitted` - Offset commit results (when `enable.auto.commit` is enabled). // -// -// Producer events +// # Producer events // // * `*kafka.Message` - delivery report for produced message. // Check `.TopicPartition.Error` for delivery result. // -// -// Generic events for both Consumer and Producer +// # Generic events for both Consumer and Producer // // * `KafkaError` - client (error codes are prefixed with _) or broker error. // These errors are normally just informational since the @@ -243,7 +237,6 @@ // the required regular expression); invoking SetOAuthBearerTokenFailure() will // schedule a new event for 10 seconds later so another retrieval can be attempted. // -// // Hint: If your application registers a signal notification // (signal.Notify) makes sure the signals channel is buffered to avoid // possible complications with blocking Poll() calls. @@ -253,9 +246,10 @@ package kafka import ( "fmt" - // Make sure librdkafka_vendor/ sub-directory is included in vendor pulls. - _ "github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor" "unsafe" + + // Make sure librdkafka_vendor/ sub-directory is included in vendor pulls. + _ "github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor" ) /* @@ -266,6 +260,13 @@ import ( static rd_kafka_topic_partition_t *_c_rdkafka_topic_partition_list_entry(rd_kafka_topic_partition_list_t *rktparlist, int idx) { return idx < rktparlist->cnt ? &rktparlist->elems[idx] : NULL; } + +static const rd_kafka_group_result_t * +group_result_by_idx (const rd_kafka_group_result_t **groups, size_t cnt, size_t idx) { + if (idx >= cnt) + return NULL; + return groups[idx]; +} */ import "C" @@ -275,11 +276,12 @@ const PartitionAny = int32(C.RD_KAFKA_PARTITION_UA) // TopicPartition is a generic placeholder for a Topic+Partition and optionally Offset. type TopicPartition struct { - Topic *string - Partition int32 - Offset Offset - Metadata *string - Error error + Topic *string + Partition int32 + Offset Offset + Metadata *string + Error error + LeaderEpoch *int32 // LeaderEpoch or nil if not available } func (p TopicPartition) String() string { @@ -316,6 +318,65 @@ func (tps TopicPartitions) Swap(i, j int) { tps[i], tps[j] = tps[j], tps[i] } +// Node represents a Kafka broker. +type Node struct { + // Node id. + ID int + // Node host. + Host string + // Node port. + Port int + // Node rack (may be nil) + Rack *string +} + +func (n Node) String() string { + return fmt.Sprintf("[%s:%d]/%d", n.Host, n.Port, n.ID) +} + +// UUID Kafka UUID representation +type UUID struct { + // Most Significant Bits. + mostSignificantBits int64 + // Least Significant Bits. + leastSignificantBits int64 + // Base64 representation + base64str string +} + +// Base64 string representation of the UUID +func (uuid UUID) String() string { + return uuid.base64str +} + +// GetMostSignificantBits returns Most Significant 64 bits of the 128 bits UUID +func (uuid UUID) GetMostSignificantBits() int64 { + return uuid.mostSignificantBits +} + +// GetLeastSignificantBits returns Least Significant 64 bits of the 128 bits UUID +func (uuid UUID) GetLeastSignificantBits() int64 { + return uuid.leastSignificantBits +} + +// ConsumerGroupTopicPartitions represents a consumer group's TopicPartitions. +type ConsumerGroupTopicPartitions struct { + // Group name + Group string + // Partitions list + Partitions []TopicPartition +} + +func (gtp ConsumerGroupTopicPartitions) String() string { + res := gtp.Group + res += "[ " + for _, tp := range gtp.Partitions { + res += tp.String() + " " + } + res += "]" + return res +} + // new_cparts_from_TopicPartitions creates a new C rd_kafka_topic_partition_list_t // from a TopicPartition array. func newCPartsFromTopicPartitions(partitions []TopicPartition) (cparts *C.rd_kafka_topic_partition_list_t) { @@ -331,6 +392,11 @@ func newCPartsFromTopicPartitions(partitions []TopicPartition) (cparts *C.rd_kaf rktpar.metadata = unsafe.Pointer(cmetadata) rktpar.metadata_size = C.size_t(len(*part.Metadata)) } + + if part.LeaderEpoch != nil { + cLeaderEpoch := C.int32_t(*part.LeaderEpoch) + C.rd_kafka_topic_partition_set_leader_epoch(rktpar, cLeaderEpoch) + } } return cparts @@ -351,6 +417,11 @@ func setupTopicPartitionFromCrktpar(partition *TopicPartition, crktpar *C.rd_kaf if crktpar.err != C.RD_KAFKA_RESP_ERR_NO_ERROR { partition.Error = newError(crktpar.err) } + + cLeaderEpoch := int32(C.rd_kafka_topic_partition_get_leader_epoch(crktpar)) + if cLeaderEpoch >= 0 { + partition.LeaderEpoch = &cLeaderEpoch + } } func newTopicPartitionsFromCparts(cparts *C.rd_kafka_topic_partition_list_t) (partitions []TopicPartition) { @@ -366,6 +437,24 @@ func newTopicPartitionsFromCparts(cparts *C.rd_kafka_topic_partition_list_t) (pa return partitions } +// cToConsumerGroupTopicPartitions converts a C rd_kafka_group_result_t array to a +// ConsumerGroupTopicPartitions slice. +func (a *AdminClient) cToConsumerGroupTopicPartitions( + cGroupResults **C.rd_kafka_group_result_t, + cGroupCount C.size_t) (result []ConsumerGroupTopicPartitions) { + result = make([]ConsumerGroupTopicPartitions, uint(cGroupCount)) + + for i := uint(0); i < uint(cGroupCount); i++ { + cGroupResult := C.group_result_by_idx(cGroupResults, cGroupCount, C.size_t(i)) + cGroupPartitions := C.rd_kafka_group_result_partitions(cGroupResult) + result[i] = ConsumerGroupTopicPartitions{ + Group: C.GoString(C.rd_kafka_group_result_name(cGroupResult)), + Partitions: newTopicPartitionsFromCparts(cGroupPartitions), + } + } + return +} + // LibraryVersion returns the underlying librdkafka library version as a // (version_int, version_str) tuple. func LibraryVersion() (int, string) { @@ -373,3 +462,22 @@ func LibraryVersion() (int, string) { verstr := C.GoString(C.rd_kafka_version_str()) return ver, verstr } + +// setSaslCredentials sets the SASL credentials used for the specified Kafka client. +// The new credentials will overwrite the old ones (which were set when creating the +// client or by a previous call to setSaslCredentials). The new credentials will be +// used the next time the client needs to establish a connection to the broker. This +// function will *not* break existing broker connections that were established with the +// old credentials. This method applies only to the SASL PLAIN and SCRAM mechanisms. +func setSaslCredentials(rk *C.rd_kafka_t, username, password string) error { + cUsername := C.CString(username) + defer C.free(unsafe.Pointer(cUsername)) + cPassword := C.CString(password) + defer C.free(unsafe.Pointer(cPassword)) + + if err := C.rd_kafka_sasl_set_credentials(rk, cUsername, cPassword); err != nil { + return newErrorFromCErrorDestroy(err) + } + + return nil +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/.gitignore b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/.gitignore similarity index 100% rename from vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/.gitignore rename to vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/.gitignore diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/LICENSES.txt b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/LICENSES.txt similarity index 58% rename from vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/LICENSES.txt rename to vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/LICENSES.txt index 1ab8a1dd..1621ba09 100644 --- a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/LICENSES.txt +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/LICENSES.txt @@ -2,7 +2,8 @@ LICENSE -------------------------------------------------------------- librdkafka - Apache Kafka C driver library -Copyright (c) 2012-2020, Magnus Edenhill +Copyright (c) 2012-2022, Magnus Edenhill + 2023, Confluent Inc. All rights reserved. Redistribution and use in source and binary forms, with or without @@ -140,10 +141,10 @@ THE SOFTWARE LICENSE.lz4 -------------------------------------------------------------- -src/rdxxhash.[ch] src/lz4*.[ch]: git@github.com:lz4/lz4.git e2827775ee80d2ef985858727575df31fc60f1f3 +src/rdxxhash.[ch] src/lz4*.[ch]: git@github.com:lz4/lz4.git 5ff839680134437dbf4678f3d0c7b371d84f4964 LZ4 Library -Copyright (c) 2011-2016, Yann Collet +Copyright (c) 2011-2020, Yann Collet All rights reserved. Redistribution and use in source and binary forms, with or without modification, @@ -197,6 +198,238 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +LICENSE.nanopb +-------------------------------------------------------------- +For files in src/nanopb : https://github.com/nanopb/nanopb/blob/8ef41e0ebd45daaf19459a011f67e66224b247cd/LICENSE.txt + +Copyright (c) 2011 Petteri Aimonen + +This software is provided 'as-is', without any express or +implied warranty. In no event will the authors be held liable +for any damages arising from the use of this software. + +Permission is granted to anyone to use this software for any +purpose, including commercial applications, and to alter it and +redistribute it freely, subject to the following restrictions: + +1. The origin of this software must not be misrepresented; you + must not claim that you wrote the original software. If you use + this software in a product, an acknowledgment in the product + documentation would be appreciated but is not required. + +2. Altered source versions must be plainly marked as such, and + must not be misrepresented as being the original software. + +3. This notice may not be removed or altered from any source + distribution. + + +LICENSE.opentelemetry +-------------------------------------------------------------- +For files in src/opentelemetry: https://github.com/open-telemetry/opentelemetry-proto/blob/81a296f9dba23e32d77f46d58c8ea4244a2157a6/LICENSE + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + LICENSE.pycrc -------------------------------------------------------------- The following license applies to the files rdcrc32.c and rdcrc32.h which diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/README.md b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/README.md similarity index 86% rename from vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/README.md rename to vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/README.md index b13dfd21..cdbaf533 100644 --- a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/README.md +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/README.md @@ -3,9 +3,10 @@ confluent-kafka-go bundles prebuilt statically linked versions of librdkafka for the following platforms: - * MacOSX x64 (aka Darwin) - * Linux glibc x64 (Ubuntu, CentOS, etc) - * Linux musl x64 (Alpine) + * MacOSX x64, arm64 (aka Darwin) + * Linux glibc x64, arm64 (Ubuntu, CentOS, etc) + * Linux musl x64, arm64 (Alpine) + * Windows x64 ## Import static librdkafka bundle diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/bundle-import.sh b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/bundle-import.sh similarity index 88% rename from vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/bundle-import.sh rename to vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/bundle-import.sh index 9ad738f3..065c6f87 100644 --- a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/bundle-import.sh +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/bundle-import.sh @@ -50,9 +50,9 @@ setup_build() { local gpath="../build_${btype}.go" local dpath="librdkafka_${btype}.a" - if [[ $btype == glibc_linux ]]; then + if [[ $btype =~ ^glibc_linux.*$ ]]; then build_tag="// +build !musl" - elif [[ $btype == musl_linux ]]; then + elif [[ $btype =~ ^musl_linux.*$ ]]; then build_tag="// +build musl" fi @@ -101,7 +101,13 @@ for f in rdkafka.h LICENSES.txt ; do done -for btype in glibc_linux musl_linux darwin_amd64 darwin_arm64 windows ; do +for btype in glibc_linux_amd64 \ + glibc_linux_arm64 \ + musl_linux_amd64 \ + musl_linux_arm64 \ + darwin_amd64 \ + darwin_arm64 \ + windows ; do lib=$bdir/librdkafka_${btype}.a pc=${lib/%.a/.pc} [[ -f $lib ]] || (echo "Expected file $lib missing" ; exit 1) diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/import.sh b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/import.sh similarity index 100% rename from vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/import.sh rename to vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/import.sh diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/librdkafka.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/librdkafka.go similarity index 100% rename from vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/librdkafka.go rename to vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/librdkafka.go diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/librdkafka_musl_linux.a b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/librdkafka_darwin_amd64.a similarity index 53% rename from vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/librdkafka_musl_linux.a rename to vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/librdkafka_darwin_amd64.a index eee1fb3c..40b2ca88 100644 Binary files a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/librdkafka_musl_linux.a and b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/librdkafka_darwin_amd64.a differ diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/librdkafka_glibc_linux.a b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/librdkafka_darwin_arm64.a similarity index 57% rename from vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/librdkafka_glibc_linux.a rename to vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/librdkafka_darwin_arm64.a index 2df79004..23db7a56 100644 Binary files a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/librdkafka_glibc_linux.a and b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/librdkafka_darwin_arm64.a differ diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/librdkafka_glibc_linux_amd64.a b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/librdkafka_glibc_linux_amd64.a new file mode 100644 index 00000000..49721929 Binary files /dev/null and b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/librdkafka_glibc_linux_amd64.a differ diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/librdkafka_glibc_linux_arm64.a b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/librdkafka_glibc_linux_arm64.a new file mode 100644 index 00000000..f533f068 Binary files /dev/null and b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/librdkafka_glibc_linux_arm64.a differ diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/librdkafka_musl_linux_amd64.a b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/librdkafka_musl_linux_amd64.a new file mode 100644 index 00000000..69626086 Binary files /dev/null and b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/librdkafka_musl_linux_amd64.a differ diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/librdkafka_musl_linux_arm64.a b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/librdkafka_musl_linux_arm64.a new file mode 100644 index 00000000..175fc792 Binary files /dev/null and b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/librdkafka_musl_linux_arm64.a differ diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/librdkafka_windows.a b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/librdkafka_windows.a new file mode 100644 index 00000000..88621e4b Binary files /dev/null and b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/librdkafka_windows.a differ diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/rdkafka.h b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/rdkafka.h similarity index 79% rename from vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/rdkafka.h rename to vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/rdkafka.h index b424b218..f977fa7f 100644 --- a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/rdkafka.h +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/rdkafka.h @@ -1,7 +1,8 @@ /* * librdkafka - Apache Kafka C library * - * Copyright (c) 2012-2022 Magnus Edenhill + * Copyright (c) 2012-2022, Magnus Edenhill + * 2023, Confluent Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -92,6 +93,7 @@ typedef SSIZE_T ssize_t; #define RD_DEPRECATED __attribute__((deprecated)) #if defined(__clang__) || defined(__GNUC__) || defined(__GNUG__) +#define RD_HAS_STATEMENT_EXPRESSIONS #define RD_FORMAT(...) __attribute__((format(__VA_ARGS__))) #else #define RD_FORMAT(...) @@ -165,7 +167,7 @@ typedef SSIZE_T ssize_t; * @remark This value should only be used during compile time, * for runtime checks of version use rd_kafka_version() */ -#define RD_KAFKA_VERSION 0x010902ff +#define RD_KAFKA_VERSION 0x020601ff /** * @brief Returns the librdkafka version as integer. @@ -260,6 +262,9 @@ typedef struct rd_kafka_error_s rd_kafka_error_t; typedef struct rd_kafka_headers_s rd_kafka_headers_t; typedef struct rd_kafka_group_result_s rd_kafka_group_result_t; typedef struct rd_kafka_acl_result_s rd_kafka_acl_result_t; +typedef struct rd_kafka_Uuid_s rd_kafka_Uuid_t; +typedef struct rd_kafka_topic_partition_result_s + rd_kafka_topic_partition_result_t; /* @endcond */ @@ -402,6 +407,11 @@ typedef enum { RD_KAFKA_RESP_ERR__NOOP = -141, /** No offset to automatically reset to */ RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET = -140, + /** Partition log truncation detected */ + RD_KAFKA_RESP_ERR__LOG_TRUNCATION = -139, + /** A different record in the batch was invalid + * and this message failed persisting. */ + RD_KAFKA_RESP_ERR__INVALID_DIFFERENT_RECORD = -138, /** End internal error codes */ RD_KAFKA_RESP_ERR__END = -100, @@ -421,7 +431,9 @@ typedef enum { RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE = 4, /** Leader not available */ RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE = 5, - /** Not leader for partition */ +/** Not leader for partition */ +#define RD_KAFKA_RESP_ERR_NOT_LEADER_OR_FOLLOWER \ + RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION = 6, /** Request timed out */ RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT = 7, @@ -624,7 +636,24 @@ typedef enum { RD_KAFKA_RESP_ERR_FEATURE_UPDATE_FAILED = 96, /** Request principal deserialization failed during forwarding */ RD_KAFKA_RESP_ERR_PRINCIPAL_DESERIALIZATION_FAILURE = 97, - + /** Unknown Topic Id */ + RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_ID = 100, + /** The member epoch is fenced by the group coordinator */ + RD_KAFKA_RESP_ERR_FENCED_MEMBER_EPOCH = 110, + /** The instance ID is still used by another member in the + * consumer group */ + RD_KAFKA_RESP_ERR_UNRELEASED_INSTANCE_ID = 111, + /** The assignor or its version range is not supported by the consumer + * group */ + RD_KAFKA_RESP_ERR_UNSUPPORTED_ASSIGNOR = 112, + /** The member epoch is stale */ + RD_KAFKA_RESP_ERR_STALE_MEMBER_EPOCH = 113, + /** Client sent a push telemetry request with an invalid or outdated + * subscription ID. */ + RD_KAFKA_RESP_ERR_UNKNOWN_SUBSCRIPTION_ID = 117, + /** Client sent a push telemetry request larger than the maximum size + * the broker will accept. */ + RD_KAFKA_RESP_ERR_TELEMETRY_TOO_LARGE = 118, RD_KAFKA_RESP_ERR_END_ALL, } rd_kafka_resp_err_t; @@ -898,10 +927,11 @@ typedef struct rd_kafka_topic_partition_s { void *opaque; /**< Opaque value for application use */ rd_kafka_resp_err_t err; /**< Error code, depending on use. */ void *_private; /**< INTERNAL USE ONLY, - * INITIALIZE TO ZERO, DO NOT TOUCH */ + * INITIALIZE TO ZERO, DO NOT TOUCH, + * DO NOT COPY, DO NOT SHARE WITH OTHER + * rd_kafka_t INSTANCES. */ } rd_kafka_topic_partition_t; - /** * @brief Destroy a rd_kafka_topic_partition_t. * @remark This must not be called for elements in a topic partition list. @@ -910,6 +940,31 @@ RD_EXPORT void rd_kafka_topic_partition_destroy(rd_kafka_topic_partition_t *rktpar); +/** + * @brief Sets the offset leader epoch (use -1 to clear). + * + * @param rktpar Partition object. + * @param leader_epoch Offset leader epoch, use -1 to reset. + * + * @remark See KIP-320 for more information. + */ +RD_EXPORT +void rd_kafka_topic_partition_set_leader_epoch( + rd_kafka_topic_partition_t *rktpar, + int32_t leader_epoch); + +/** + * @returns the offset leader epoch, if relevant and known, + * else -1. + * + * @param rktpar Partition object. + * + * @remark See KIP-320 for more information. + */ +RD_EXPORT +int32_t rd_kafka_topic_partition_get_leader_epoch( + const rd_kafka_topic_partition_t *rktpar); + /** * @brief A growable list of Topic+Partitions. * @@ -920,7 +975,6 @@ typedef struct rd_kafka_topic_partition_list_s { rd_kafka_topic_partition_t *elems; /**< Element array[] */ } rd_kafka_topic_partition_list_t; - /** * @brief Create a new list/vector Topic+Partition container. * @@ -938,7 +992,6 @@ typedef struct rd_kafka_topic_partition_list_s { RD_EXPORT rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_new(int size); - /** * @brief Free all resources used by the list and the list itself. */ @@ -1429,7 +1482,8 @@ typedef struct rd_kafka_message_s { * for retried messages when * idempotence is enabled. */ void *_private; /**< Consumer: - * - rdkafka private pointer: DO NOT MODIFY + * - rdkafka private pointer: + * DO NOT MODIFY, DO NOT COPY. * Producer: * - dr_msg_cb: * msg_opaque from produce() call or @@ -1454,6 +1508,16 @@ void rd_kafka_message_destroy(rd_kafka_message_t *rkmessage); RD_EXPORT const char *rd_kafka_message_errstr(const rd_kafka_message_t *rkmessage); +/** + * @brief Returns the error string for an errored produced rd_kafka_message_t or + * NULL if there was no error. + * + * @remark This function MUST used with the producer. + */ +RD_EXPORT +const char * +rd_kafka_message_produce_errstr(const rd_kafka_message_t *rkmessage); + /** * @brief Returns the message timestamp for a consumed message. @@ -1585,6 +1649,87 @@ typedef enum { RD_EXPORT rd_kafka_msg_status_t rd_kafka_message_status(const rd_kafka_message_t *rkmessage); + +/** + * @returns the message's partition leader epoch at the time the message was + * fetched and if known, else -1. + * + * @remark This API must only be used on consumed messages without error. + * @remark Requires broker version >= 2.10 (KIP-320). + */ +RD_EXPORT int32_t +rd_kafka_message_leader_epoch(const rd_kafka_message_t *rkmessage); + + +/**@}*/ + + +/** + * @name UUID + * @{ + * + */ + +/** + * @brief Computes base64 encoding for the given uuid string. + * @param uuid UUID for which base64 encoding is required. + * + * @return base64 encoded string for the given UUID or NULL in case of some + * issue with the conversion or the conversion is not supported. + */ +RD_EXPORT const char *rd_kafka_Uuid_base64str(const rd_kafka_Uuid_t *uuid); + +/** + * @brief Gets least significant 64 bits for the given UUID. + * + * @param uuid UUID + * + * @return least significant 64 bits for the given UUID. + */ +RD_EXPORT int64_t +rd_kafka_Uuid_least_significant_bits(const rd_kafka_Uuid_t *uuid); + + +/** + * @brief Gets most significant 64 bits for the given UUID. + * + * @param uuid UUID + * + * @return most significant 64 bits for the given UUID. + */ +RD_EXPORT int64_t +rd_kafka_Uuid_most_significant_bits(const rd_kafka_Uuid_t *uuid); + + +/** + * @brief Creates a new UUID. + * + * @param most_significant_bits most significant 64 bits of the 128 bits UUID. + * @param least_significant_bits least significant 64 bits of the 128 bits UUID. + * + * @return A newly allocated UUID. + * @remark Must be freed after use using rd_kafka_Uuid_destroy() + */ +RD_EXPORT rd_kafka_Uuid_t *rd_kafka_Uuid_new(int64_t most_significant_bits, + int64_t least_significant_bits); + +/** + * @brief Copies the given UUID. + * + * @param uuid UUID to be copied. + * + * @return A newly allocated copy of the provided UUID. + * @remark Must be freed after use using rd_kafka_Uuid_destroy() + */ +RD_EXPORT rd_kafka_Uuid_t *rd_kafka_Uuid_copy(const rd_kafka_Uuid_t *uuid); + +/** + * @brief Destroy the provided uuid. + * + * @param uuid UUID + */ +RD_EXPORT void rd_kafka_Uuid_destroy(rd_kafka_Uuid_t *uuid); + /**@}*/ @@ -1695,7 +1840,7 @@ const rd_kafka_conf_t *rd_kafka_conf(rd_kafka_t *rk); * Topic-level configuration properties may be set using this interface * in which case they are applied on the \c default_topic_conf. * If no \c default_topic_conf has been set one will be created. - * Any sub-sequent rd_kafka_conf_set_default_topic_conf() calls will + * Any subsequent rd_kafka_conf_set_default_topic_conf() calls will * replace the current default topic configuration. * * @returns \c rd_kafka_conf_res_t to indicate success or failure. @@ -2056,7 +2201,7 @@ void rd_kafka_conf_set_log_cb(rd_kafka_conf_t *conf, * rd_kafka_conf_set_opaque(). * * For more information on the format of \p json, see - * https://github.com/edenhill/librdkafka/wiki/Statistics + * https://github.com/confluentinc/librdkafka/wiki/Statistics * * If the application wishes to hold on to the \p json pointer and free * it at a later time it must return 1 from the \p stats_cb. @@ -2244,6 +2389,35 @@ void rd_kafka_conf_set_open_cb( int (*open_cb)(const char *pathname, int flags, mode_t mode, void *opaque)); #endif +/** Forward declaration to avoid netdb.h or winsock includes */ +struct addrinfo; + +/** + * @brief Set address resolution callback. + * + * The callback is responsible for resolving the hostname \p node and the + * service \p service into a list of socket addresses as \c getaddrinfo(3) + * would. The \p hints and \p res parameters function as they do for + * \c getaddrinfo(3). The callback's \p opaque argument is the opaque set with + * rd_kafka_conf_set_opaque(). + * + * If the callback is invoked with a NULL \p node, \p service, and \p hints, the + * callback should instead free the addrinfo struct specified in \p res. In this + * case the callback must succeed; the return value will not be checked by the + * caller. + * + * The callback's return value is interpreted as the return value of \p + * \c getaddrinfo(3). + * + * @remark The callback will be called from an internal librdkafka thread. + */ +RD_EXPORT void +rd_kafka_conf_set_resolve_cb(rd_kafka_conf_t *conf, + int (*resolve_cb)(const char *node, + const char *service, + const struct addrinfo *hints, + struct addrinfo **res, + void *opaque)); /** * @brief Sets the verification callback of the broker certificate @@ -2363,6 +2537,14 @@ typedef enum rd_kafka_cert_enc_t { * * @remark CA certificate in PEM format may also be set with the * `ssl.ca.pem` configuration property. + * + * @remark When librdkafka is linked to OpenSSL 3.0 and the certificate is + * encoded using an obsolete cipher, it might be necessary to set up + * an OpenSSL configuration file to load the "legacy" provider and + * set the OPENSSL_CONF environment variable. + * See + * https://github.com/openssl/openssl/blob/master/README-PROVIDERS.md for more + * information. */ RD_EXPORT rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert(rd_kafka_conf_t *conf, @@ -2527,9 +2709,8 @@ void rd_kafka_conf_properties_show(FILE *fp); /** * @name Topic configuration - * @{ - * * @brief Topic configuration property interface + * @{ * */ @@ -2845,7 +3026,7 @@ int32_t rd_kafka_msg_partitioner_fnv1a_random(const rd_kafka_topic_t *rkt, * \p conf is an optional struct created with `rd_kafka_conf_new()` that will * be used instead of the default configuration. * The \p conf object is freed by this function on success and must not be used - * or destroyed by the application sub-sequently. + * or destroyed by the application subsequently. * See `rd_kafka_conf_set()` et.al for more information. * * \p errstr must be a pointer to memory of at least size \p errstr_size where @@ -2991,7 +3172,7 @@ int32_t rd_kafka_controllerid(rd_kafka_t *rk, int timeout_ms); * `rd_kafka_topic_conf_new()` that will be used instead of the default * topic configuration. * The \p conf object is freed by this function and must not be used or - * destroyed by the application sub-sequently. + * destroyed by the application subsequently. * See `rd_kafka_topic_conf_set()` et.al for more information. * * Topic handles are refcounted internally and calling rd_kafka_topic_new() @@ -3051,22 +3232,22 @@ void *rd_kafka_topic_opaque(const rd_kafka_topic_t *rkt); /** * @brief Polls the provided kafka handle for events. * - * Events will cause application provided callbacks to be called. + * Events will cause application-provided callbacks to be called. * * The \p timeout_ms argument specifies the maximum amount of time * (in milliseconds) that the call will block waiting for events. * For non-blocking calls, provide 0 as \p timeout_ms. - * To wait indefinately for an event, provide -1. + * To wait indefinitely for an event, provide -1. * * @remark An application should make sure to call poll() at regular * intervals to serve any queued callbacks waiting to be called. * @remark If your producer doesn't have any callback set (in particular * via rd_kafka_conf_set_dr_msg_cb or rd_kafka_conf_set_error_cb) - * you might chose not to call poll(), though this is not + * you might choose not to call poll(), though this is not * recommended. * * Events: - * - delivery report callbacks (if dr_cb/dr_msg_cb is configured) [producer] + * - delivery report callbacks (if dr_cb/dr_msg_cb is configured) [producer] * - error callbacks (rd_kafka_conf_set_error_cb()) [all] * - stats callbacks (rd_kafka_conf_set_stats_cb()) [all] * - throttle callbacks (rd_kafka_conf_set_throttle_cb()) [all] @@ -3323,6 +3504,25 @@ RD_EXPORT rd_kafka_error_t *rd_kafka_sasl_background_callbacks_enable(rd_kafka_t *rk); +/** + * @brief Sets SASL credentials used for SASL PLAIN and SCRAM mechanisms by + * this Kafka client. + * + * This function sets or resets the SASL username and password credentials + * used by this Kafka client. The new credentials will be used the next time + * this client needs to authenticate to a broker. This function + * will not disconnect existing connections that might have been made using + * the old credentials. + * + * @remark This function only applies to the SASL PLAIN and SCRAM mechanisms. + * + * @returns NULL on success or an error object on error. + */ +RD_EXPORT +rd_kafka_error_t *rd_kafka_sasl_set_credentials(rd_kafka_t *rk, + const char *username, + const char *password); + /** * @returns a reference to the librdkafka consumer queue. * This is the queue served by rd_kafka_consumer_poll(). @@ -3331,6 +3531,12 @@ rd_kafka_error_t *rd_kafka_sasl_background_callbacks_enable(rd_kafka_t *rk); * * @remark rd_kafka_queue_destroy() MUST be called on this queue * prior to calling rd_kafka_consumer_close(). + * @remark Polling the returned queue counts as a consumer poll, and will reset + * the timer for max.poll.interval.ms. If this queue is forwarded to a + * "destq", polling destq also counts as a consumer poll (this works + * for any number of forwards). However, even if this queue is + * unforwarded or forwarded elsewhere, polling destq will continue + * to count as a consumer poll. */ RD_EXPORT rd_kafka_queue_t *rd_kafka_queue_get_consumer(rd_kafka_t *rk); @@ -3764,6 +3970,8 @@ int rd_kafka_consume_callback(rd_kafka_topic_t *rkt, void *commit_opaque); +/**@}*/ + /** * @name Simple Consumer API (legacy): Queue consumers * @{ @@ -3832,6 +4040,11 @@ int rd_kafka_consume_callback_queue( * The \c offset + 1 will be committed (written) to broker (or file) according * to \c `auto.commit.interval.ms` or manual offset-less commit() * + * @deprecated This API lacks support for partition leader epochs, which makes + * it at risk for unclean leader election log truncation issues. + * Use rd_kafka_offsets_store() and rd_kafka_offset_store_message() + * instead. + * * @warning This method may only be called for partitions that are currently * assigned. * Non-assigned partitions will fail with RD_KAFKA_RESP_ERR__STATE. @@ -3874,6 +4087,9 @@ rd_kafka_offset_store(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset); * @remark \c `enable.auto.offset.store` must be set to "false" when using * this API. * + * @remark The leader epoch, if set, will be used to fence outdated partition + * leaders. See rd_kafka_topic_partition_set_leader_epoch(). + * * @returns RD_KAFKA_RESP_ERR_NO_ERROR on (partial) success, or * RD_KAFKA_RESP_ERR__INVALID_ARG if \c enable.auto.offset.store * is true, or @@ -3883,14 +4099,39 @@ rd_kafka_offset_store(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset); RD_EXPORT rd_kafka_resp_err_t rd_kafka_offsets_store(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *offsets); + + +/** + * @brief Store offset +1 for the consumed message. + * + * The message offset + 1 will be committed to broker according + * to \c `auto.commit.interval.ms` or manual offset-less commit() + * + * @warning This method may only be called for partitions that are currently + * assigned. + * Non-assigned partitions will fail with RD_KAFKA_RESP_ERR__STATE. + * Since v1.9.0. + * + * @warning Avoid storing offsets after calling rd_kafka_seek() (et.al) as + * this may later interfere with resuming a paused partition, instead + * store offsets prior to calling seek. + * + * @remark \c `enable.auto.offset.store` must be set to "false" when using + * this API. + * + * @returns NULL on success or an error object on failure. + */ +RD_EXPORT +rd_kafka_error_t *rd_kafka_offset_store_message(rd_kafka_message_t *rkmessage); + /**@}*/ /** * @name KafkaConsumer (C) - * @{ * @brief High-level KafkaConsumer C API + * @{ * * * @@ -4193,6 +4434,21 @@ RD_EXPORT int rd_kafka_assignment_lost(rd_kafka_t *rk); * or successfully scheduled if asynchronous, or failed. * RD_KAFKA_RESP_ERR__FATAL is returned if the consumer has raised * a fatal error. + * + * FIXME: Update below documentation. + * + * RD_KAFKA_RESP_ERR_STALE_MEMBER_EPOCH is returned, when + * using `group.protocol=consumer`, if the commit failed because the + * member has switched to a new member epoch. + * This error code can be retried. + * Partition level error is also set in the \p offsets. + * + * RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID is returned, when + * using `group.protocol=consumer`, if the member has been + * removed from the consumer group + * This error code is permanent, uncommitted messages will be + * reprocessed by this or a different member and committed there. + * Partition level error is also set in the \p offsets. */ RD_EXPORT rd_kafka_resp_err_t rd_kafka_commit(rd_kafka_t *rk, @@ -4337,6 +4593,20 @@ rd_kafka_consumer_group_metadata_new_with_genid(const char *group_id, const char *group_instance_id); +/** + * @brief Get member id of a group metadata. + * + * @param group_metadata The group metadata + * + * @returns The member id contained in the passed \p group_metadata. + * + * @remark The returned pointer has the same lifetime as \p group_metadata. + */ +RD_EXPORT +const char *rd_kafka_consumer_group_metadata_member_id( + const rd_kafka_consumer_group_metadata_t *group_metadata); + + /** * @brief Frees the consumer group metadata object as returned by * rd_kafka_consumer_group_metadata(). @@ -4419,13 +4689,13 @@ RD_EXPORT rd_kafka_error_t *rd_kafka_consumer_group_metadata_read( #define RD_KAFKA_MSG_F_BLOCK \ 0x4 /**< Block produce*() on message queue full. \ * WARNING: If a delivery report callback \ - * is used the application MUST \ + * is used, the application MUST \ * call rd_kafka_poll() (or equiv.) \ * to make sure delivered messages \ * are drained from the internal \ * delivery report queue. \ * Failure to do so will result \ - * in indefinately blocking on \ + * in indefinitely blocking on \ * the produce() call when the \ * message queue is full. */ #define RD_KAFKA_MSG_F_PARTITION \ @@ -4440,10 +4710,10 @@ RD_EXPORT rd_kafka_error_t *rd_kafka_consumer_group_metadata_read( * \p rkt is the target topic which must have been previously created with * `rd_kafka_topic_new()`. * - * `rd_kafka_produce()` is an asynch non-blocking API. + * `rd_kafka_produce()` is an asynchronous non-blocking API. * See `rd_kafka_conf_set_dr_msg_cb` on how to setup a callback to be called * once the delivery status (success or failure) is known. The delivery report - * is trigged by the application calling `rd_kafka_poll()` (at regular + * is triggered by the application calling `rd_kafka_poll()` (at regular * intervals) or `rd_kafka_flush()` (at termination). * * Since producing is asynchronous, you should call `rd_kafka_flush()` before @@ -4660,7 +4930,7 @@ rd_kafka_resp_err_t rd_kafka_flush(rd_kafka_t *rk, int timeout_ms); * RD_KAFKA_RESP_ERR__PURGE_INFLIGHT. * * @warning Purging messages that are in-flight to or from the broker - * will ignore any sub-sequent acknowledgement for these messages + * will ignore any subsequent acknowledgement for these messages * received from the broker, effectively making it impossible * for the application to know if the messages were successfully * produced or not. This may result in duplicate messages if the @@ -4762,7 +5032,6 @@ typedef struct rd_kafka_metadata { char *orig_broker_name; /**< Name of originating broker */ } rd_kafka_metadata_t; - /** * @brief Request Metadata from broker. * @@ -4797,6 +5066,53 @@ rd_kafka_metadata(rd_kafka_t *rk, RD_EXPORT void rd_kafka_metadata_destroy(const struct rd_kafka_metadata *metadata); +/** + * @brief Node (broker) information. + */ +typedef struct rd_kafka_Node_s rd_kafka_Node_t; + +/** + * @brief Get the id of \p node. + * + * @param node The Node instance. + * + * @return The node id. + */ +RD_EXPORT +int rd_kafka_Node_id(const rd_kafka_Node_t *node); + +/** + * @brief Get the host of \p node. + * + * @param node The Node instance. + * + * @return The node host. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p node object. + */ +RD_EXPORT +const char *rd_kafka_Node_host(const rd_kafka_Node_t *node); + +/** + * @brief Get the port of \p node. + * + * @param node The Node instance. + * + * @return The node port. + */ +RD_EXPORT +uint16_t rd_kafka_Node_port(const rd_kafka_Node_t *node); + +/** + * @brief Get the rack of \p node. + * + * @param node The Node instance + * + * @return The node rack id. May be NULL. + */ +RD_EXPORT +const char *rd_kafka_Node_rack(const rd_kafka_Node_t *node); /**@}*/ @@ -4829,6 +5145,33 @@ struct rd_kafka_group_member_info { int member_assignment_size; /**< Member assignment size in bytes */ }; +/** + * @enum rd_kafka_consumer_group_state_t + * + * @brief Consumer group state. + */ +typedef enum { + RD_KAFKA_CONSUMER_GROUP_STATE_UNKNOWN = 0, + RD_KAFKA_CONSUMER_GROUP_STATE_PREPARING_REBALANCE = 1, + RD_KAFKA_CONSUMER_GROUP_STATE_COMPLETING_REBALANCE = 2, + RD_KAFKA_CONSUMER_GROUP_STATE_STABLE = 3, + RD_KAFKA_CONSUMER_GROUP_STATE_DEAD = 4, + RD_KAFKA_CONSUMER_GROUP_STATE_EMPTY = 5, + RD_KAFKA_CONSUMER_GROUP_STATE__CNT +} rd_kafka_consumer_group_state_t; + +/** + * @enum rd_kafka_consumer_group_type_t + * + * @brief Consumer group type. + */ +typedef enum { + RD_KAFKA_CONSUMER_GROUP_TYPE_UNKNOWN = 0, + RD_KAFKA_CONSUMER_GROUP_TYPE_CONSUMER = 1, + RD_KAFKA_CONSUMER_GROUP_TYPE_CLASSIC = 2, + RD_KAFKA_CONSUMER_GROUP_TYPE__CNT +} rd_kafka_consumer_group_type_t; + /** * @brief Group information */ @@ -4857,7 +5200,7 @@ struct rd_kafka_group_list { /** * @brief List and describe client groups in cluster. * - * \p group is an optional group name to describe, otherwise (\p NULL) all + * \p group is an optional group name to describe, otherwise (\c NULL) all * groups are returned. * * \p timeout_ms is the (approximate) maximum time to wait for response @@ -4880,6 +5223,9 @@ struct rd_kafka_group_list { * group list. * * @sa Use rd_kafka_group_list_destroy() to release list memory. + * + * @deprecated Use rd_kafka_ListConsumerGroups() and + * rd_kafka_DescribeConsumerGroups() instead. */ RD_EXPORT rd_kafka_resp_err_t @@ -4888,6 +5234,52 @@ rd_kafka_list_groups(rd_kafka_t *rk, const struct rd_kafka_group_list **grplistp, int timeout_ms); +/** + * @brief Returns a name for a state code. + * + * @param state The state value. + * + * @return The group state name corresponding to the provided group state value. + */ +RD_EXPORT +const char * +rd_kafka_consumer_group_state_name(rd_kafka_consumer_group_state_t state); + +/** + * @brief Returns a code for a state name. + * + * @param name The state name. + * + * @return The group state value corresponding to the provided group state name. + */ +RD_EXPORT +rd_kafka_consumer_group_state_t +rd_kafka_consumer_group_state_code(const char *name); + +/** + * @brief Returns a name for a group type code. + * + * @param type The group type value. + * + * @return The group type name corresponding to the provided group type value. + */ +RD_EXPORT +const char * +rd_kafka_consumer_group_type_name(rd_kafka_consumer_group_type_t type); + +/** + * @brief Returns a code for a group type name. + * + * @param name The group type name. + * + * @remark The comparison is case-insensitive. + * + * @return The group type value corresponding to the provided group type name. + */ +RD_EXPORT +rd_kafka_consumer_group_type_t +rd_kafka_consumer_group_type_code(const char *name); + /** * @brief Release list memory */ @@ -5141,6 +5533,28 @@ typedef int rd_kafka_event_type_t; #define RD_KAFKA_EVENT_CREATEACLS_RESULT 0x400 /**< CreateAcls_result_t */ #define RD_KAFKA_EVENT_DESCRIBEACLS_RESULT 0x800 /**< DescribeAcls_result_t */ #define RD_KAFKA_EVENT_DELETEACLS_RESULT 0x1000 /**< DeleteAcls_result_t */ +/** ListConsumerGroupsResult_t */ +#define RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT 0x2000 +/** DescribeConsumerGroups_result_t */ +#define RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT 0x4000 +/** ListConsumerGroupOffsets_result_t */ +#define RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT 0x8000 +/** AlterConsumerGroupOffsets_result_t */ +#define RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT 0x10000 +/** IncrementalAlterConfigs_result_t */ +#define RD_KAFKA_EVENT_INCREMENTALALTERCONFIGS_RESULT 0x20000 +/** DescribeUserScramCredentials_result_t */ +#define RD_KAFKA_EVENT_DESCRIBEUSERSCRAMCREDENTIALS_RESULT 0x40000 +/** AlterUserScramCredentials_result_t */ +#define RD_KAFKA_EVENT_ALTERUSERSCRAMCREDENTIALS_RESULT 0x80000 +/** DescribeTopics_result_t */ +#define RD_KAFKA_EVENT_DESCRIBETOPICS_RESULT 0x100000 +/** DescribeCluster_result_t */ +#define RD_KAFKA_EVENT_DESCRIBECLUSTER_RESULT 0x200000 +/** ListOffsets_result_t */ +#define RD_KAFKA_EVENT_LISTOFFSETS_RESULT 0x400000 +/** ElectLeaders_result_t */ +#define RD_KAFKA_EVENT_ELECTLEADERS_RESULT 0x800000 /** * @returns the event type for the given event. @@ -5287,10 +5701,19 @@ int rd_kafka_event_error_is_fatal(rd_kafka_event_t *rkev); * - RD_KAFKA_EVENT_DESCRIBEACLS_RESULT * - RD_KAFKA_EVENT_DELETEACLS_RESULT * - RD_KAFKA_EVENT_ALTERCONFIGS_RESULT + * - RD_KAFKA_EVENT_INCREMENTAL_ALTERCONFIGS_RESULT * - RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT * - RD_KAFKA_EVENT_DELETEGROUPS_RESULT * - RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT * - RD_KAFKA_EVENT_DELETERECORDS_RESULT + * - RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT + * - RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT + * - RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT + * - RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT + * - RD_KAFKA_EVENT_DESCRIBETOPICS_RESULT + * - RD_KAFKA_EVENT_DESCRIBECLUSTER_RESULT + * - RD_KAFKA_EVENT_LISTOFFSETS_RESULT + * - RD_KAFKA_EVENT_ELECTLEADERS_RESULT */ RD_EXPORT void *rd_kafka_event_opaque(rd_kafka_event_t *rkev); @@ -5386,14 +5809,36 @@ typedef rd_kafka_event_t rd_kafka_DeleteAcls_result_t; typedef rd_kafka_event_t rd_kafka_CreatePartitions_result_t; /*! AlterConfigs result type */ typedef rd_kafka_event_t rd_kafka_AlterConfigs_result_t; +/*! IncrementalAlterConfigs result type */ +typedef rd_kafka_event_t rd_kafka_IncrementalAlterConfigs_result_t; /*! CreateTopics result type */ typedef rd_kafka_event_t rd_kafka_DescribeConfigs_result_t; /*! DeleteRecords result type */ typedef rd_kafka_event_t rd_kafka_DeleteRecords_result_t; +/*! ListConsumerGroups result type */ +typedef rd_kafka_event_t rd_kafka_ListConsumerGroups_result_t; +/*! DescribeConsumerGroups result type */ +typedef rd_kafka_event_t rd_kafka_DescribeConsumerGroups_result_t; /*! DeleteGroups result type */ typedef rd_kafka_event_t rd_kafka_DeleteGroups_result_t; /*! DeleteConsumerGroupOffsets result type */ typedef rd_kafka_event_t rd_kafka_DeleteConsumerGroupOffsets_result_t; +/*! AlterConsumerGroupOffsets result type */ +typedef rd_kafka_event_t rd_kafka_AlterConsumerGroupOffsets_result_t; +/*! ListConsumerGroupOffsets result type */ +typedef rd_kafka_event_t rd_kafka_ListConsumerGroupOffsets_result_t; +/*! DescribeTopics result type */ +typedef rd_kafka_event_t rd_kafka_DescribeTopics_result_t; +/*! DescribeCluster result type */ +typedef rd_kafka_event_t rd_kafka_DescribeCluster_result_t; +/*! DescribeUserScramCredentials result type */ +typedef rd_kafka_event_t rd_kafka_DescribeUserScramCredentials_result_t; +/*! AlterUserScramCredentials result type */ +typedef rd_kafka_event_t rd_kafka_AlterUserScramCredentials_result_t; +/*! ListOffsets result type */ +typedef rd_kafka_event_t rd_kafka_ListOffsets_result_t; +/*! ElectLeaders result type */ +typedef rd_kafka_event_t rd_kafka_ElectLeaders_result_t; /** * @brief Get CreateTopics result. @@ -5443,6 +5888,18 @@ rd_kafka_event_CreatePartitions_result(rd_kafka_event_t *rkev); RD_EXPORT const rd_kafka_AlterConfigs_result_t * rd_kafka_event_AlterConfigs_result(rd_kafka_event_t *rkev); +/** + * @brief Get IncrementalAlterConfigs result. + * + * @returns the result of a IncrementalAlterConfigs request, or NULL if event is + * of different type. + * + * Event types: + * RD_KAFKA_EVENT_INCREMENTALALTERCONFIGS_RESULT + */ +RD_EXPORT const rd_kafka_IncrementalAlterConfigs_result_t * +rd_kafka_event_IncrementalAlterConfigs_result(rd_kafka_event_t *rkev); + /** * @brief Get DescribeConfigs result. * @@ -5465,6 +5922,65 @@ rd_kafka_event_DescribeConfigs_result(rd_kafka_event_t *rkev); RD_EXPORT const rd_kafka_DeleteRecords_result_t * rd_kafka_event_DeleteRecords_result(rd_kafka_event_t *rkev); +/** + * @brief Get ListConsumerGroups result. + * + * @returns the result of a ListConsumerGroups request, or NULL if event is of + * different type. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p rkev object. + * + * Event types: + * RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT + */ +RD_EXPORT const rd_kafka_ListConsumerGroups_result_t * +rd_kafka_event_ListConsumerGroups_result(rd_kafka_event_t *rkev); + +/** + * @brief Get DescribeConsumerGroups result. + * + * @returns the result of a DescribeConsumerGroups request, or NULL if event is + * of different type. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p rkev object. + * + * Event types: + * RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT + */ +RD_EXPORT const rd_kafka_DescribeConsumerGroups_result_t * +rd_kafka_event_DescribeConsumerGroups_result(rd_kafka_event_t *rkev); + +/** + * @brief Get DescribeTopics result. + * + * @returns the result of a DescribeTopics request, or NULL if event is + * of different type. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p rkev object. + * + * Event types: + * RD_KAFKA_EVENT_DESCRIBETOPICS_RESULT + */ +RD_EXPORT const rd_kafka_DescribeTopics_result_t * +rd_kafka_event_DescribeTopics_result(rd_kafka_event_t *rkev); + +/** + * @brief Get DescribeCluster result. + * + * @returns the result of a DescribeCluster request, or NULL if event is + * of different type. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p rkev object. + * + * Event types: + * RD_KAFKA_EVENT_DESCRIBECLUSTER_RESULT + */ +RD_EXPORT const rd_kafka_DescribeCluster_result_t * +rd_kafka_event_DescribeCluster_result(rd_kafka_event_t *rkev); /** * @brief Get DeleteGroups result. * @@ -5520,44 +6036,135 @@ RD_EXPORT const rd_kafka_DeleteAcls_result_t * rd_kafka_event_DeleteAcls_result(rd_kafka_event_t *rkev); /** - * @brief Poll a queue for an event for max \p timeout_ms. + * @brief Get ListConsumerGroupOffsets result. * - * @returns an event, or NULL. + * @returns the result of a ListConsumerGroupOffsets request, or NULL if + * event is of different type. * - * @remark Use rd_kafka_event_destroy() to free the event. + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p rkev object. * - * @sa rd_kafka_conf_set_background_event_cb() + * Event types: + * RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT */ -RD_EXPORT -rd_kafka_event_t *rd_kafka_queue_poll(rd_kafka_queue_t *rkqu, int timeout_ms); +RD_EXPORT const rd_kafka_ListConsumerGroupOffsets_result_t * +rd_kafka_event_ListConsumerGroupOffsets_result(rd_kafka_event_t *rkev); /** - * @brief Poll a queue for events served through callbacks for max \p - * timeout_ms. + * @brief Get AlterConsumerGroupOffsets result. * - * @returns the number of events served. - * - * @remark This API must only be used for queues with callbacks registered - * for all expected event types. E.g., not a message queue. + * @returns the result of a AlterConsumerGroupOffsets request, or NULL if + * event is of different type. * - * @remark Also see rd_kafka_conf_set_background_event_cb() for triggering - * event callbacks from a librdkafka-managed background thread. + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p rkev object. * - * @sa rd_kafka_conf_set_background_event_cb() + * Event types: + * RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT */ -RD_EXPORT -int rd_kafka_queue_poll_callback(rd_kafka_queue_t *rkqu, int timeout_ms); - - -/**@}*/ - +RD_EXPORT const rd_kafka_AlterConsumerGroupOffsets_result_t * +rd_kafka_event_AlterConsumerGroupOffsets_result(rd_kafka_event_t *rkev); /** - * @name Plugin interface + * @brief Get ListOffsets result. * - * @brief A plugin interface that allows external runtime-loaded libraries - * to integrate with a client instance without modifications to - * the application code. + * @returns the result of a ListOffsets request, or NULL if + * event is of different type. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p rkev object. + * + * Event types: + * RD_KAFKA_EVENT_LISTOFFSETS_RESULT + */ +RD_EXPORT const rd_kafka_ListOffsets_result_t * +rd_kafka_event_ListOffsets_result(rd_kafka_event_t *rkev); + + +/** + * @brief Get DescribeUserScramCredentials result. + * + * @returns the result of a DescribeUserScramCredentials request, or NULL if + * event is of different type. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p rkev object. + * + * Event types: + * RD_KAFKA_EVENT_DESCRIBEUSERSCRAMCREDENTIALS_RESULT + */ +RD_EXPORT const rd_kafka_DescribeUserScramCredentials_result_t * +rd_kafka_event_DescribeUserScramCredentials_result(rd_kafka_event_t *rkev); + +/** + * @brief Get AlterUserScramCredentials result. + * + * @returns the result of a AlterUserScramCredentials request, or NULL if + * event is of different type. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p rkev object. + * + * Event types: + * RD_KAFKA_EVENT_ALTERUSERSCRAMCREDENTIALS_RESULT + */ +RD_EXPORT const rd_kafka_AlterUserScramCredentials_result_t * +rd_kafka_event_AlterUserScramCredentials_result(rd_kafka_event_t *rkev); + +/** + * @brief Get ElectLeaders result. + * + * @returns the result of a ElectLeaders request, or NULL if + * event is of different type. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p rkev object. + * + * Event types: + * RD_KAFKA_EVENT_ELECTLEADERS_RESULT + */ +RD_EXPORT const rd_kafka_ElectLeaders_result_t * +rd_kafka_event_ElectLeaders_result(rd_kafka_event_t *rkev); + +/** + * @brief Poll a queue for an event for max \p timeout_ms. + * + * @returns an event, or NULL. + * + * @remark Use rd_kafka_event_destroy() to free the event. + * + * @sa rd_kafka_conf_set_background_event_cb() + */ +RD_EXPORT +rd_kafka_event_t *rd_kafka_queue_poll(rd_kafka_queue_t *rkqu, int timeout_ms); + +/** + * @brief Poll a queue for events served through callbacks for max \p + * timeout_ms. + * + * @returns the number of events served. + * + * @remark This API must only be used for queues with callbacks registered + * for all expected event types. E.g., not a message queue. + * + * @remark Also see rd_kafka_conf_set_background_event_cb() for triggering + * event callbacks from a librdkafka-managed background thread. + * + * @sa rd_kafka_conf_set_background_event_cb() + */ +RD_EXPORT +int rd_kafka_queue_poll_callback(rd_kafka_queue_t *rkqu, int timeout_ms); + + +/**@}*/ + + +/** + * @name Plugin interface + * + * @brief A plugin interface that allows external runtime-loaded libraries + * to integrate with a client instance without modifications to + * the application code. * * Plugins are loaded when referenced through the `plugin.library.paths` * configuration property and operates on the \c rd_kafka_conf_t @@ -5567,6 +6174,7 @@ int rd_kafka_queue_poll_callback(rd_kafka_queue_t *rkqu, int timeout_ms); * and not statically. Failure to do so will lead to missing symbols * or finding symbols in another librdkafka library than the * application was linked with. + * @{ */ @@ -5985,6 +6593,29 @@ typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_thread_exit_t)( void *ic_opaque); +/** + * @brief on_broker_state_change() is called just after a broker + * has been created or its state has been changed. + * + * @param rk The client instance. + * @param broker_id The broker id (-1 is used for bootstrap brokers). + * @param secproto The security protocol. + * @param name The original name of the broker. + * @param port The port of the broker. + * @param state Broker state name. + * @param ic_opaque The interceptor's opaque pointer specified in ..add..(). + * + * @returns an error code on failure, the error is logged but otherwise ignored. + */ +typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_broker_state_change_t)( + rd_kafka_t *rk, + int32_t broker_id, + const char *secproto, + const char *name, + int port, + const char *state, + void *ic_opaque); + /** * @brief Append an on_conf_set() interceptor. @@ -5995,7 +6626,7 @@ typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_thread_exit_t)( * @param ic_opaque Opaque value that will be passed to the function. * * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT - * if an existing intercepted with the same \p ic_name and function + * if an existing interceptor with the same \p ic_name and function * has already been added to \p conf. */ RD_EXPORT rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_set( @@ -6014,7 +6645,7 @@ RD_EXPORT rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_set( * @param ic_opaque Opaque value that will be passed to the function. * * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT - * if an existing intercepted with the same \p ic_name and function + * if an existing interceptor with the same \p ic_name and function * has already been added to \p conf. */ RD_EXPORT rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_dup( @@ -6061,7 +6692,7 @@ RD_EXPORT rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_destroy( * has not already been added. * * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT - * if an existing intercepted with the same \p ic_name and function + * if an existing interceptor with the same \p ic_name and function * has already been added to \p conf. */ RD_EXPORT rd_kafka_resp_err_t @@ -6081,7 +6712,7 @@ rd_kafka_conf_interceptor_add_on_new(rd_kafka_conf_t *conf, * @param ic_opaque Opaque value that will be passed to the function. * * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT - * if an existing intercepted with the same \p ic_name and function + * if an existing interceptor with the same \p ic_name and function * has already been added to \p conf. */ RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_destroy( @@ -6118,7 +6749,7 @@ rd_kafka_interceptor_add_on_send(rd_kafka_t *rk, * @param ic_opaque Opaque value that will be passed to the function. * * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT - * if an existing intercepted with the same \p ic_name and function + * if an existing interceptor with the same \p ic_name and function * has already been added to \p conf. */ RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_acknowledgement( @@ -6137,7 +6768,7 @@ RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_acknowledgement( * @param ic_opaque Opaque value that will be passed to the function. * * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT - * if an existing intercepted with the same \p ic_name and function + * if an existing interceptor with the same \p ic_name and function * has already been added to \p conf. */ RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_consume( @@ -6156,7 +6787,7 @@ RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_consume( * @param ic_opaque Opaque value that will be passed to the function. * * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT - * if an existing intercepted with the same \p ic_name and function + * if an existing interceptor with the same \p ic_name and function * has already been added to \p conf. */ RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_commit( @@ -6175,7 +6806,7 @@ RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_commit( * @param ic_opaque Opaque value that will be passed to the function. * * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT - * if an existing intercepted with the same \p ic_name and function + * if an existing interceptor with the same \p ic_name and function * has already been added to \p conf. */ RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_request_sent( @@ -6194,7 +6825,7 @@ RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_request_sent( * @param ic_opaque Opaque value that will be passed to the function. * * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT - * if an existing intercepted with the same \p ic_name and function + * if an existing interceptor with the same \p ic_name and function * has already been added to \p conf. */ RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_response_received( @@ -6213,7 +6844,7 @@ RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_response_received( * @param ic_opaque Opaque value that will be passed to the function. * * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT - * if an existing intercepted with the same \p ic_name and function + * if an existing interceptor with the same \p ic_name and function * has already been added to \p conf. */ RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_start( @@ -6232,7 +6863,7 @@ RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_start( * @param ic_opaque Opaque value that will be passed to the function. * * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT - * if an existing intercepted with the same \p ic_name and function + * if an existing interceptor with the same \p ic_name and function * has already been added to \p conf. */ RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_exit( @@ -6242,6 +6873,26 @@ RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_exit( void *ic_opaque); +/** + * @brief Append an on_broker_state_change() interceptor. + * + * @param rk Client instance. + * @param ic_name Interceptor name, used in logging. + * @param on_broker_state_change() Function pointer. + * @param ic_opaque Opaque value that will be passed to the function. + * + * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT + * if an existing interceptor with the same \p ic_name and function + * has already been added to \p conf. + */ +RD_EXPORT +rd_kafka_resp_err_t rd_kafka_interceptor_add_on_broker_state_change( + rd_kafka_t *rk, + const char *ic_name, + rd_kafka_interceptor_f_on_broker_state_change_t *on_broker_state_change, + void *ic_opaque); + + /**@}*/ @@ -6311,6 +6962,30 @@ rd_kafka_group_result_name(const rd_kafka_group_result_t *groupres); RD_EXPORT const rd_kafka_topic_partition_list_t * rd_kafka_group_result_partitions(const rd_kafka_group_result_t *groupres); +/** + * @brief Topic Partition Result provides per-topic+partition operation result + * Consists of TopicPartition object and error object. + */ + +/** + * @returns the topic partition object from the topic partition result object. + * @remarks lifetime of the returned string is the same as the \p + * partition_result. + * The error object is set inside the topic partition object. For the + * detailed error information, use + * rd_kafka_topic_partition_result_error() + */ +RD_EXPORT const rd_kafka_topic_partition_t * +rd_kafka_topic_partition_result_partition( + const rd_kafka_topic_partition_result_t *partition_result); + +/** + * @returns the error object from the topic partition result object. + * @remarks lifetime of the returned string is the same as the \p + * partition_result. + */ +RD_EXPORT const rd_kafka_error_t *rd_kafka_topic_partition_result_error( + const rd_kafka_topic_partition_result_t *partition_result); /**@}*/ @@ -6369,10 +7044,26 @@ typedef enum rd_kafka_admin_op_t { RD_KAFKA_ADMIN_OP_DELETEGROUPS, /**< DeleteGroups */ /** DeleteConsumerGroupOffsets */ RD_KAFKA_ADMIN_OP_DELETECONSUMERGROUPOFFSETS, - RD_KAFKA_ADMIN_OP_CREATEACLS, /**< CreateAcls */ - RD_KAFKA_ADMIN_OP_DESCRIBEACLS, /**< DescribeAcls */ - RD_KAFKA_ADMIN_OP_DELETEACLS, /**< DeleteAcls */ - RD_KAFKA_ADMIN_OP__CNT /**< Number of ops defined */ + RD_KAFKA_ADMIN_OP_CREATEACLS, /**< CreateAcls */ + RD_KAFKA_ADMIN_OP_DESCRIBEACLS, /**< DescribeAcls */ + RD_KAFKA_ADMIN_OP_DELETEACLS, /**< DeleteAcls */ + RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPS, /**< ListConsumerGroups */ + RD_KAFKA_ADMIN_OP_DESCRIBECONSUMERGROUPS, /**< DescribeConsumerGroups */ + /** ListConsumerGroupOffsets */ + RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPOFFSETS, + /** AlterConsumerGroupOffsets */ + RD_KAFKA_ADMIN_OP_ALTERCONSUMERGROUPOFFSETS, + /** IncrementalAlterConfigs */ + RD_KAFKA_ADMIN_OP_INCREMENTALALTERCONFIGS, + /** DescribeUserScramCredentials */ + RD_KAFKA_ADMIN_OP_DESCRIBEUSERSCRAMCREDENTIALS, + /** AlterUserScramCredentials */ + RD_KAFKA_ADMIN_OP_ALTERUSERSCRAMCREDENTIALS, + RD_KAFKA_ADMIN_OP_DESCRIBETOPICS, /**< DescribeTopics */ + RD_KAFKA_ADMIN_OP_DESCRIBECLUSTER, /**< DescribeCluster */ + RD_KAFKA_ADMIN_OP_LISTOFFSETS, /**< ListOffsets */ + RD_KAFKA_ADMIN_OP_ELECTLEADERS, /**< ElectLeaders */ + RD_KAFKA_ADMIN_OP__CNT /**< Number of ops defined */ } rd_kafka_admin_op_t; /** @@ -6389,6 +7080,18 @@ typedef enum rd_kafka_admin_op_t { typedef struct rd_kafka_AdminOptions_s rd_kafka_AdminOptions_t; +/** + * @enum rd_kafka_IsolationLevel_t + * + * @brief IsolationLevel enum name for use with rd_kafka_AdminOptions_new() + * + * @sa rd_kafka_AdminOptions_new() + */ +typedef enum rd_kafka_IsolationLevel_t { + RD_KAFKA_ISOLATION_LEVEL_READ_UNCOMMITTED = 0, + RD_KAFKA_ISOLATION_LEVEL_READ_COMMITTED = 1 +} rd_kafka_IsolationLevel_t; + /** * @brief Create a new AdminOptions object. * @@ -6423,8 +7126,7 @@ RD_EXPORT void rd_kafka_AdminOptions_destroy(rd_kafka_AdminOptions_t *options); * request transmission, operation time on broker, and response. * * @param options Admin options. - * @param timeout_ms Timeout in milliseconds, use -1 for indefinite timeout. - * Defaults to `socket.timeout.ms`. + * @param timeout_ms Timeout in milliseconds. Defaults to `socket.timeout.ms`. * @param errstr A human readable error string (nul-terminated) is written to * this location that must be of at least \p errstr_size bytes. * The \p errstr is only written in case of error. @@ -6508,6 +7210,8 @@ rd_kafka_AdminOptions_set_validate_only(rd_kafka_AdminOptions_t *options, * the following exceptions: * - AlterConfigs with a BROKER resource are sent to the broker id set * as the resource name. + * - IncrementalAlterConfigs with a BROKER resource are sent to the broker id + * set as the resource name. * - DescribeConfigs with a BROKER resource are sent to the broker id set * as the resource name. * @@ -6533,6 +7237,85 @@ rd_kafka_AdminOptions_set_broker(rd_kafka_AdminOptions_t *options, size_t errstr_size); +/** + * @brief Whether broker should return stable offsets + * (transaction-committed). + * + * @param options Admin options. + * @param true_or_false Defaults to false. + * + * @return NULL on success, a new error instance that must be + * released with rd_kafka_error_destroy() in case of error. + * + * @remark This option is valid for ListConsumerGroupOffsets. + */ +RD_EXPORT +rd_kafka_error_t *rd_kafka_AdminOptions_set_require_stable_offsets( + rd_kafka_AdminOptions_t *options, + int true_or_false); + +/** + * @brief Whether broker should return authorized operations for the given + * resource in the DescribeConsumerGroups, DescribeTopics, or + * DescribeCluster calls. + * + * @param options Admin options. + * @param true_or_false Defaults to false. + * + * @return NULL on success, a new error instance that must be + * released with rd_kafka_error_destroy() in case of error. + * + * @remark This option is valid for DescribeConsumerGroups, DescribeTopics, + * DescribeCluster. + */ +RD_EXPORT +rd_kafka_error_t *rd_kafka_AdminOptions_set_include_authorized_operations( + rd_kafka_AdminOptions_t *options, + int true_or_false); + +/** + * @brief Set consumer groups states to query for. + * + * @param options Admin options. + * @param consumer_group_states Array of consumer group states. + * @param consumer_group_states_cnt Size of the \p consumer_group_states array. + * + * @return NULL on success, a new error instance that must be + * released with rd_kafka_error_destroy() in case of error. + * + * @remark This option is valid for ListConsumerGroups. + */ +RD_EXPORT +rd_kafka_error_t *rd_kafka_AdminOptions_set_match_consumer_group_states( + rd_kafka_AdminOptions_t *options, + const rd_kafka_consumer_group_state_t *consumer_group_states, + size_t consumer_group_states_cnt); + +/** + * @brief Set consumer groups types to query for. + * + * @param options Admin options. + * @param consumer_group_types Array of consumer group types. + * @param consumer_group_types_cnt Size of the \p consumer_group_types array. + * + * @return NULL on success, a new error instance that must be + * released with rd_kafka_error_destroy() in case of error. + * + * @remark This option is valid for ListConsumerGroups. + */ +RD_EXPORT +rd_kafka_error_t *rd_kafka_AdminOptions_set_match_consumer_group_types( + rd_kafka_AdminOptions_t *options, + const rd_kafka_consumer_group_type_t *consumer_group_types, + size_t consumer_group_types_cnt); + +/** + * @brief Set Isolation Level to an allowed `rd_kafka_IsolationLevel_t` value. + */ +RD_EXPORT +rd_kafka_error_t * +rd_kafka_AdminOptions_set_isolation_level(rd_kafka_AdminOptions_t *options, + rd_kafka_IsolationLevel_t value); /** * @brief Set application opaque value that can be extracted from the @@ -6544,8 +7327,39 @@ rd_kafka_AdminOptions_set_opaque(rd_kafka_AdminOptions_t *options, -/* - * CreateTopics - create topics in cluster. +/** + * @enum rd_kafka_AclOperation_t + * @brief Apache Kafka ACL operation types. Common type for multiple Admin API + * functions. + */ +typedef enum rd_kafka_AclOperation_t { + RD_KAFKA_ACL_OPERATION_UNKNOWN = 0, /**< Unknown */ + RD_KAFKA_ACL_OPERATION_ANY = + 1, /**< In a filter, matches any AclOperation */ + RD_KAFKA_ACL_OPERATION_ALL = 2, /**< ALL operation */ + RD_KAFKA_ACL_OPERATION_READ = 3, /**< READ operation */ + RD_KAFKA_ACL_OPERATION_WRITE = 4, /**< WRITE operation */ + RD_KAFKA_ACL_OPERATION_CREATE = 5, /**< CREATE operation */ + RD_KAFKA_ACL_OPERATION_DELETE = 6, /**< DELETE operation */ + RD_KAFKA_ACL_OPERATION_ALTER = 7, /**< ALTER operation */ + RD_KAFKA_ACL_OPERATION_DESCRIBE = 8, /**< DESCRIBE operation */ + RD_KAFKA_ACL_OPERATION_CLUSTER_ACTION = + 9, /**< CLUSTER_ACTION operation */ + RD_KAFKA_ACL_OPERATION_DESCRIBE_CONFIGS = + 10, /**< DESCRIBE_CONFIGS operation */ + RD_KAFKA_ACL_OPERATION_ALTER_CONFIGS = + 11, /**< ALTER_CONFIGS operation */ + RD_KAFKA_ACL_OPERATION_IDEMPOTENT_WRITE = + 12, /**< IDEMPOTENT_WRITE operation */ + RD_KAFKA_ACL_OPERATION__CNT +} rd_kafka_AclOperation_t; + +/**@}*/ + +/** + * @name Admin API - Topics + * @brief Topic related operations. + * @{ * */ @@ -6759,9 +7573,12 @@ RD_EXPORT const rd_kafka_topic_result_t **rd_kafka_DeleteTopics_result_topics( size_t *cntp); +/**@}*/ -/* - * CreatePartitions - add partitions to topic. +/** + * @name Admin API - Partitions + * @brief Partition related operations. + * @{ * */ @@ -6880,10 +7697,12 @@ rd_kafka_CreatePartitions_result_topics( const rd_kafka_CreatePartitions_result_t *result, size_t *cntp); +/**@}*/ - -/* - * Cluster, broker, topic configuration entries, sources, etc. +/** + * @name Admin API - Configuration + * @brief Cluster, broker, topic configuration entries, sources, etc. + * @{ * */ @@ -7001,12 +7820,13 @@ rd_kafka_ConfigEntry_synonyms(const rd_kafka_ConfigEntry_t *entry, * @brief Apache Kafka resource types */ typedef enum rd_kafka_ResourceType_t { - RD_KAFKA_RESOURCE_UNKNOWN = 0, /**< Unknown */ - RD_KAFKA_RESOURCE_ANY = 1, /**< Any (used for lookups) */ - RD_KAFKA_RESOURCE_TOPIC = 2, /**< Topic */ - RD_KAFKA_RESOURCE_GROUP = 3, /**< Group */ - RD_KAFKA_RESOURCE_BROKER = 4, /**< Broker */ - RD_KAFKA_RESOURCE__CNT, /**< Number of resource types defined */ + RD_KAFKA_RESOURCE_UNKNOWN = 0, /**< Unknown */ + RD_KAFKA_RESOURCE_ANY = 1, /**< Any (used for lookups) */ + RD_KAFKA_RESOURCE_TOPIC = 2, /**< Topic */ + RD_KAFKA_RESOURCE_GROUP = 3, /**< Group */ + RD_KAFKA_RESOURCE_BROKER = 4, /**< Broker */ + RD_KAFKA_RESOURCE_TRANSACTIONAL_ID = 5, /**< Transactional ID */ + RD_KAFKA_RESOURCE__CNT, /**< Number of resource types defined */ } rd_kafka_ResourceType_t; /** @@ -7027,6 +7847,18 @@ typedef enum rd_kafka_ResourcePatternType_t { RD_KAFKA_RESOURCE_PATTERN_TYPE__CNT, } rd_kafka_ResourcePatternType_t; +/** + * @enum rd_kafka_AlterConfigOpType_t + * @brief Incremental alter configs operations. + */ +typedef enum rd_kafka_AlterConfigOpType_t { + RD_KAFKA_ALTER_CONFIG_OP_TYPE_SET = 0, + RD_KAFKA_ALTER_CONFIG_OP_TYPE_DELETE = 1, + RD_KAFKA_ALTER_CONFIG_OP_TYPE_APPEND = 2, + RD_KAFKA_ALTER_CONFIG_OP_TYPE_SUBTRACT = 3, + RD_KAFKA_ALTER_CONFIG_OP_TYPE__CNT, +} rd_kafka_AlterConfigOpType_t; + /** * @returns a string representation of the \p resource_pattern_type */ @@ -7092,6 +7924,31 @@ rd_kafka_ConfigResource_set_config(rd_kafka_ConfigResource_t *config, const char *value); +/** + * @brief Add the value of the configuration entry for a subsequent + * incremental alter config operation. APPEND and SUBTRACT are + * possible for list-type configuration entries only. + * + * @param config ConfigResource to add config property to. + * @param name Configuration name, depends on resource type. + * @param op_type Operation type, one of rd_kafka_AlterConfigOpType_t. + * @param value Configuration value, depends on resource type and \p name. + * Set to \c NULL, only with with op_type set to DELETE, + * to revert configuration value to default. + * + * @returns NULL on success, or an rd_kafka_error_t * + * with the corresponding error code and string. + * Error ownership belongs to the caller. + * Possible error codes: + * - RD_KAFKA_RESP_ERR__INVALID_ARG on invalid input. + */ +RD_EXPORT rd_kafka_error_t *rd_kafka_ConfigResource_add_incremental_config( + rd_kafka_ConfigResource_t *config, + const char *name, + rd_kafka_AlterConfigOpType_t op_type, + const char *value); + + /** * @brief Get an array of config entries from a ConfigResource object. * @@ -7157,6 +8014,8 @@ rd_kafka_ConfigResource_error_string(const rd_kafka_ConfigResource_t *config); * since these resource requests must be sent to the broker specified * in the resource. * + * @deprecated Use rd_kafka_IncrementalAlterConfigs(). + * */ RD_EXPORT void rd_kafka_AlterConfigs(rd_kafka_t *rk, @@ -7191,6 +8050,66 @@ rd_kafka_AlterConfigs_result_resources( +/* + * IncrementalAlterConfigs - alter cluster configuration incrementally. + * + */ + + +/** + * @brief Incrementally update the configuration for the specified resources. + * Updates are not transactional so they may succeed for some resources + * while fail for others. The configs for a particular resource are + * updated atomically, executing the corresponding incremental operations + * on the provided configurations. + * + * @remark Requires broker version >=2.3.0 + * + * @remark Multiple resources and resource types may be set, but at most one + * resource of type \c RD_KAFKA_RESOURCE_BROKER is allowed per call + * since these resource requests must be sent to the broker specified + * in the resource. Broker option will be ignored in this case. + * + * @param rk Client instance. + * @param configs Array of config entries to alter. + * @param config_cnt Number of elements in \p configs array. + * @param options Optional admin options, or NULL for defaults. + * @param rkqu Queue to emit result on. + */ +RD_EXPORT +void rd_kafka_IncrementalAlterConfigs(rd_kafka_t *rk, + rd_kafka_ConfigResource_t **configs, + size_t config_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); + + +/* + * IncrementalAlterConfigs result type and methods + */ + +/** + * @brief Get an array of resource results from a IncrementalAlterConfigs + * result. + * + * Use \c rd_kafka_ConfigResource_error() and + * \c rd_kafka_ConfigResource_error_string() to extract per-resource error + * results on the returned array elements. + * + * The returned object life-times are the same as the \p result object. + * + * @param result Result object to get resource results from. + * @param cntp is updated to the number of elements in the array. + * + * @returns an array of ConfigResource elements, or NULL if not available. + */ +RD_EXPORT const rd_kafka_ConfigResource_t ** +rd_kafka_IncrementalAlterConfigs_result_resources( + const rd_kafka_IncrementalAlterConfigs_result_t *result, + size_t *cntp); + + + /* * DescribeConfigs - retrieve cluster configuration. * @@ -7248,10 +8167,13 @@ rd_kafka_DescribeConfigs_result_resources( size_t *cntp); -/* - * DeleteRecords - delete records (messages) from partitions - * - * +/**@}*/ + +/** + * @name Admin API - DeleteRecords + * @brief delete records (messages) from partitions. + * @{ + * */ /**! Represents records to be deleted */ @@ -7337,8 +8259,708 @@ RD_EXPORT const rd_kafka_topic_partition_list_t * rd_kafka_DeleteRecords_result_offsets( const rd_kafka_DeleteRecords_result_t *result); -/* - * DeleteGroups - delete groups from cluster +/**@}*/ + +/** + * @name Admin API - DescribeTopics + * @{ + */ + +/** + * @brief Represents a collection of topics, to be passed to DescribeTopics. + * + */ +typedef struct rd_kafka_TopicCollection_s rd_kafka_TopicCollection_t; + +/** + * @brief TopicPartition represents a partition in the DescribeTopics result. + * + */ +typedef struct rd_kafka_TopicPartitionInfo_s rd_kafka_TopicPartitionInfo_t; + +/** + * @brief DescribeTopics result type. + * + */ +typedef struct rd_kafka_TopicDescription_s rd_kafka_TopicDescription_t; + +/** + * @brief Creates a new TopicCollection for passing to rd_kafka_DescribeTopics. + * + * @param topics A list of topics. + * @param topics_cnt Count of topics. + * + * @return a newly allocated TopicCollection object. Must be freed using + * rd_kafka_TopicCollection_destroy when done. + */ +RD_EXPORT +rd_kafka_TopicCollection_t * +rd_kafka_TopicCollection_of_topic_names(const char **topics, size_t topics_cnt); + +/** + * @brief Destroy and free a TopicCollection object created with + * rd_kafka_TopicCollection_new_* methods. + */ +RD_EXPORT void +rd_kafka_TopicCollection_destroy(rd_kafka_TopicCollection_t *topics); + +/** + * @brief Describe topics as specified by the \p topics + * array of size \p topics_cnt elements. + * + * @param rk Client instance. + * @param topics Collection of topics to describe. + * @param options Optional admin options, or NULL for defaults. + * Valid options: + * - include_authorized_operations + * @param rkqu Queue to emit result on. + * + * @remark The result event type emitted on the supplied queue is of type + * \c RD_KAFKA_EVENT_DESCRIBETOPICS_RESULT + */ +RD_EXPORT +void rd_kafka_DescribeTopics(rd_kafka_t *rk, + const rd_kafka_TopicCollection_t *topics, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); + +/** + * @brief Get an array of topic results from a DescribeTopics result. + * + * @param result Result to get topics results from. + * @param cntp is updated to the number of elements in the array. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p result object. + */ +RD_EXPORT +const rd_kafka_TopicDescription_t **rd_kafka_DescribeTopics_result_topics( + const rd_kafka_DescribeTopics_result_t *result, + size_t *cntp); + + +/** + * @brief Gets an array of partitions for the \p topicdesc topic. + * + * @param topicdesc The topic description. + * @param cntp is updated to the number of partitions in the array. + * + * @return An array of TopicPartitionInfos. + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p topicdesc object. + */ +RD_EXPORT +const rd_kafka_TopicPartitionInfo_t **rd_kafka_TopicDescription_partitions( + const rd_kafka_TopicDescription_t *topicdesc, + size_t *cntp); + + +/** + * @brief Gets the partition id for \p partition. + * + * @param partition The partition info. + * + * @return The partition id. + */ +RD_EXPORT +const int rd_kafka_TopicPartitionInfo_partition( + const rd_kafka_TopicPartitionInfo_t *partition); + + +/** + * @brief Gets the partition leader for \p partition. + * + * @param partition The partition info. + * + * @return The partition leader. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p partition object. + */ +RD_EXPORT +const rd_kafka_Node_t *rd_kafka_TopicPartitionInfo_leader( + const rd_kafka_TopicPartitionInfo_t *partition); + +/** + * @brief Gets the partition in-sync replicas for \p partition. + * + * @param partition The partition info. + * @param cntp is updated with in-sync replicas count. + * + * @return The in-sync replica nodes. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p partition object. + */ +RD_EXPORT +const rd_kafka_Node_t ** +rd_kafka_TopicPartitionInfo_isr(const rd_kafka_TopicPartitionInfo_t *partition, + size_t *cntp); + +/** + * @brief Gets the partition replicas for \p partition. + * + * @param partition The partition info. + * @param cntp is updated with partition replicas count. + * + * @return The partition replicas nodes. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p partition object. + */ +RD_EXPORT +const rd_kafka_Node_t **rd_kafka_TopicPartitionInfo_replicas( + const rd_kafka_TopicPartitionInfo_t *partition, + size_t *cntp); + +/** + * @brief Gets the topic authorized ACL operations for the \p topicdesc topic. + * + * @param topicdesc The topic description. + * @param cntp is updated with authorized ACL operations count. + * + * @return The topic authorized operations. Is NULL if operations were not + * requested. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p topicdesc object. + */ +RD_EXPORT +const rd_kafka_AclOperation_t *rd_kafka_TopicDescription_authorized_operations( + const rd_kafka_TopicDescription_t *topicdesc, + size_t *cntp); + +/** + * @brief Gets the topic name for the \p topicdesc topic. + * + * @param topicdesc The topic description. + * + * @return The topic name. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p topicdesc object. + */ +RD_EXPORT +const char * +rd_kafka_TopicDescription_name(const rd_kafka_TopicDescription_t *topicdesc); + +/** + * @brief Gets the topic id for the \p topicdesc topic. + * + * @param topicdesc The topic description. + * @return The topic id + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p topicdesc object. + */ +RD_EXPORT const rd_kafka_Uuid_t *rd_kafka_TopicDescription_topic_id( + const rd_kafka_TopicDescription_t *topicdesc); + +/** + * @brief Gets if the \p topicdesc topic is internal. + * + * @param topicdesc The topic description. + * + * @return 1 if the topic is internal to Kafka, 0 otherwise. + */ +RD_EXPORT +int rd_kafka_TopicDescription_is_internal( + const rd_kafka_TopicDescription_t *topicdesc); + +/** + * @brief Gets the error for the \p topicdesc topic. + * + * @param topicdesc The topic description. + * + * @return The topic description error. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p topicdesc object. + */ +RD_EXPORT +const rd_kafka_error_t * +rd_kafka_TopicDescription_error(const rd_kafka_TopicDescription_t *topicdesc); + + +/**@}*/ + +/** + * @name Admin API - DescribeCluster + * @{ + */ + +/** + * @brief Describes the cluster. + * + * @param rk Client instance. + * @param options Optional admin options, or NULL for defaults. + * Valid options: + * - include_authorized_operations + * @param rkqu Queue to emit result on. + * + * @remark The result event type emitted on the supplied queue is of type + * \c RD_KAFKA_EVENT_DESCRIBECLUSTER_RESULT + */ +RD_EXPORT +void rd_kafka_DescribeCluster(rd_kafka_t *rk, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); + +/** + * @brief Gets the broker nodes for the \p result cluster. + * + * @param result The result of DescribeCluster. + * @param cntp is updated with the count of broker nodes. + * + * @return An array of broker nodes. + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p result object. + */ +RD_EXPORT +const rd_kafka_Node_t **rd_kafka_DescribeCluster_result_nodes( + const rd_kafka_DescribeCluster_result_t *result, + size_t *cntp); + +/** + * @brief Gets the authorized ACL operations for the \p result cluster. + * + * @param result The result of DescribeCluster. + * @param cntp is updated with authorized ACL operations count. + * + * @return The cluster authorized operations. Is NULL if operations were not + * requested. + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p result object. + */ +RD_EXPORT +const rd_kafka_AclOperation_t * +rd_kafka_DescribeCluster_result_authorized_operations( + const rd_kafka_DescribeCluster_result_t *result, + size_t *cntp); + +/** + * @brief Gets the current controller for the \p result cluster. + * + * @param result The result of DescribeCluster. + * + * @return The cluster current controller. + */ +RD_EXPORT +const rd_kafka_Node_t *rd_kafka_DescribeCluster_result_controller( + const rd_kafka_DescribeCluster_result_t *result); + +/** + * @brief Gets the cluster id for the \p result cluster. + * + * @param result The result of DescribeCluster. + * + * @return The cluster id. + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p result object. + */ +RD_EXPORT +const char *rd_kafka_DescribeCluster_result_cluster_id( + const rd_kafka_DescribeCluster_result_t *result); + +/**@}*/ + + +/** + * @name Admin API - ListConsumerGroups + * @{ + */ + + +/** + * @brief ListConsumerGroups result for a single group + */ + +/**! ListConsumerGroups result for a single group */ +typedef struct rd_kafka_ConsumerGroupListing_s rd_kafka_ConsumerGroupListing_t; + +/**! ListConsumerGroups results and errors */ +typedef struct rd_kafka_ListConsumerGroupsResult_s + rd_kafka_ListConsumerGroupsResult_t; + +/** + * @brief List the consumer groups available in the cluster. + * + * @param rk Client instance. + * @param options Optional admin options, or NULL for defaults. + * @param rkqu Queue to emit result on. + * + * @remark The result event type emitted on the supplied queue is of type + * \c RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT + */ +RD_EXPORT +void rd_kafka_ListConsumerGroups(rd_kafka_t *rk, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); + +/** + * @brief Gets the group id for the \p grplist group. + * + * @param grplist The group listing. + * + * @return The group id. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p grplist object. + */ +RD_EXPORT +const char *rd_kafka_ConsumerGroupListing_group_id( + const rd_kafka_ConsumerGroupListing_t *grplist); + +/** + * @brief Is the \p grplist group a simple consumer group. + * + * @param grplist The group listing. + * + * @return 1 if the group is a simple consumer group, + * else 0. + */ +RD_EXPORT +int rd_kafka_ConsumerGroupListing_is_simple_consumer_group( + const rd_kafka_ConsumerGroupListing_t *grplist); + +/** + * @brief Gets state for the \p grplist group. + * + * @param grplist The group listing. + * + * @return A group state. + */ +RD_EXPORT +rd_kafka_consumer_group_state_t rd_kafka_ConsumerGroupListing_state( + const rd_kafka_ConsumerGroupListing_t *grplist); + +/** + * @brief Gets type for the \p grplist group. + * + * @param grplist The group listing. + * + * @return A group type. + */ +RD_EXPORT +rd_kafka_consumer_group_type_t rd_kafka_ConsumerGroupListing_type( + const rd_kafka_ConsumerGroupListing_t *grplist); + +/** + * @brief Get an array of valid list groups from a ListConsumerGroups result. + * + * The returned groups life-time is the same as the \p result object. + * + * @param result Result to get group results from. + * @param cntp is updated to the number of elements in the array. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p result object. + */ +RD_EXPORT +const rd_kafka_ConsumerGroupListing_t ** +rd_kafka_ListConsumerGroups_result_valid( + const rd_kafka_ListConsumerGroups_result_t *result, + size_t *cntp); + +/** + * @brief Get an array of errors from a ListConsumerGroups call result. + * + * The returned errors life-time is the same as the \p result object. + * + * @param result ListConsumerGroups result. + * @param cntp Is updated to the number of elements in the array. + * + * @return Array of errors in \p result. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p result object. + */ +RD_EXPORT +const rd_kafka_error_t **rd_kafka_ListConsumerGroups_result_errors( + const rd_kafka_ListConsumerGroups_result_t *result, + size_t *cntp); + +/**@}*/ + +/** + * @name Admin API - DescribeConsumerGroups + * @{ + */ + +/** + * @brief DescribeConsumerGroups result type. + * + */ +typedef struct rd_kafka_ConsumerGroupDescription_s + rd_kafka_ConsumerGroupDescription_t; + +/** + * @brief Member description included in ConsumerGroupDescription. + * + */ +typedef struct rd_kafka_MemberDescription_s rd_kafka_MemberDescription_t; + +/** + * @brief Member assignment included in MemberDescription. + * + */ +typedef struct rd_kafka_MemberAssignment_s rd_kafka_MemberAssignment_t; + +/** + * @brief Describe groups from cluster as specified by the \p groups + * array of size \p groups_cnt elements. + * + * @param rk Client instance. + * @param groups Array of groups to describe. + * @param groups_cnt Number of elements in \p groups array. + * @param options Optional admin options, or NULL for defaults. + * Valid options: + * - include_authorized_operations + * @param rkqu Queue to emit result on. + * + * @remark The result event type emitted on the supplied queue is of type + * \c RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT + */ +RD_EXPORT +void rd_kafka_DescribeConsumerGroups(rd_kafka_t *rk, + const char **groups, + size_t groups_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); + +/** + * @brief Get an array of group results from a DescribeConsumerGroups result. + * + * The returned groups life-time is the same as the \p result object. + * + * @param result Result to get group results from. + * @param cntp is updated to the number of elements in the array. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p result object. + */ +RD_EXPORT +const rd_kafka_ConsumerGroupDescription_t ** +rd_kafka_DescribeConsumerGroups_result_groups( + const rd_kafka_DescribeConsumerGroups_result_t *result, + size_t *cntp); + + +/** + * @brief Gets the group id for the \p grpdesc group. + * + * @param grpdesc The group description. + * + * @return The group id. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p grpdesc object. + */ +RD_EXPORT +const char *rd_kafka_ConsumerGroupDescription_group_id( + const rd_kafka_ConsumerGroupDescription_t *grpdesc); + +/** + * @brief Gets the error for the \p grpdesc group. + * + * @param grpdesc The group description. + * + * @return The group description error. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p grpdesc object. + */ +RD_EXPORT +const rd_kafka_error_t *rd_kafka_ConsumerGroupDescription_error( + const rd_kafka_ConsumerGroupDescription_t *grpdesc); + +/** + * @brief Is the \p grpdesc group a simple consumer group. + * + * @param grpdesc The group description. + * @return 1 if the group is a simple consumer group, + * else 0. + */ +RD_EXPORT +int rd_kafka_ConsumerGroupDescription_is_simple_consumer_group( + const rd_kafka_ConsumerGroupDescription_t *grpdesc); + + +/** + * @brief Gets the partition assignor for the \p grpdesc group. + * + * @param grpdesc The group description. + * + * @return The partition assignor. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p grpdesc object. + */ +RD_EXPORT +const char *rd_kafka_ConsumerGroupDescription_partition_assignor( + const rd_kafka_ConsumerGroupDescription_t *grpdesc); + +/** + * @brief Gets the authorized ACL operations for the \p grpdesc group. + * + * @param grpdesc The group description. + * @param cntp is updated with authorized ACL operations count. + * + * @return The group authorized operations. Is NULL if operations were not + * requested. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p grpdesc object. + */ +RD_EXPORT +const rd_kafka_AclOperation_t * +rd_kafka_ConsumerGroupDescription_authorized_operations( + const rd_kafka_ConsumerGroupDescription_t *grpdesc, + size_t *cntp); + +/** + * @brief Gets state for the \p grpdesc group. + * + * @param grpdesc The group description. + * + * @return A group state. + */ +RD_EXPORT +rd_kafka_consumer_group_state_t rd_kafka_ConsumerGroupDescription_state( + const rd_kafka_ConsumerGroupDescription_t *grpdesc); + +/** + * @brief Gets the coordinator for the \p grpdesc group. + * + * @param grpdesc The group description. + * + * @return The group coordinator. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p grpdesc object. + */ +RD_EXPORT +const rd_kafka_Node_t *rd_kafka_ConsumerGroupDescription_coordinator( + const rd_kafka_ConsumerGroupDescription_t *grpdesc); + +/** + * @brief Gets the members count of \p grpdesc group. + * + * @param grpdesc The group description. + * + * @return The member count. + */ +RD_EXPORT +size_t rd_kafka_ConsumerGroupDescription_member_count( + const rd_kafka_ConsumerGroupDescription_t *grpdesc); + +/** + * @brief Gets a member of \p grpdesc group. + * + * @param grpdesc The group description. + * @param idx The member idx. + * + * @return A member at index \p idx, or NULL if + * \p idx is out of range. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p grpdesc object. + */ +RD_EXPORT +const rd_kafka_MemberDescription_t *rd_kafka_ConsumerGroupDescription_member( + const rd_kafka_ConsumerGroupDescription_t *grpdesc, + size_t idx); + +/** + * @brief Gets client id of \p member. + * + * @param member The group member. + * + * @return The client id. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p member object. + */ +RD_EXPORT +const char *rd_kafka_MemberDescription_client_id( + const rd_kafka_MemberDescription_t *member); + +/** + * @brief Gets group instance id of \p member. + * + * @param member The group member. + * + * @return The group instance id, or NULL if not available. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p member object. + */ +RD_EXPORT +const char *rd_kafka_MemberDescription_group_instance_id( + const rd_kafka_MemberDescription_t *member); + +/** + * @brief Gets consumer id of \p member. + * + * @param member The group member. + * + * @return The consumer id. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p member object. + */ +RD_EXPORT +const char *rd_kafka_MemberDescription_consumer_id( + const rd_kafka_MemberDescription_t *member); + +/** + * @brief Gets host of \p member. + * + * @param member The group member. + * + * @return The host. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p member object. + */ +RD_EXPORT +const char * +rd_kafka_MemberDescription_host(const rd_kafka_MemberDescription_t *member); + +/** + * @brief Gets assignment of \p member. + * + * @param member The group member. + * + * @return The member assignment. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p member object. + */ +RD_EXPORT +const rd_kafka_MemberAssignment_t *rd_kafka_MemberDescription_assignment( + const rd_kafka_MemberDescription_t *member); + +/** + * @brief Gets assigned partitions of a member \p assignment. + * + * @param assignment The group member assignment. + * + * @return The assigned partitions. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p assignment object. + */ +RD_EXPORT +const rd_kafka_topic_partition_list_t *rd_kafka_MemberAssignment_partitions( + const rd_kafka_MemberAssignment_t *assignment); + +/**@}*/ + +/** + * @name Admin API - DeleteGroups + * @brief Delete groups from cluster + * @{ * * */ @@ -7355,13 +8977,15 @@ typedef struct rd_kafka_DeleteGroup_s rd_kafka_DeleteGroup_t; * @returns a new allocated DeleteGroup object. * Use rd_kafka_DeleteGroup_destroy() to free object when done. */ -RD_EXPORT rd_kafka_DeleteGroup_t *rd_kafka_DeleteGroup_new(const char *group); +RD_EXPORT +rd_kafka_DeleteGroup_t *rd_kafka_DeleteGroup_new(const char *group); /** * @brief Destroy and free a DeleteGroup object previously created with * rd_kafka_DeleteGroup_new() */ -RD_EXPORT void rd_kafka_DeleteGroup_destroy(rd_kafka_DeleteGroup_t *del_group); +RD_EXPORT +void rd_kafka_DeleteGroup_destroy(rd_kafka_DeleteGroup_t *del_group); /** * @brief Helper function to destroy all DeleteGroup objects in @@ -7384,6 +9008,8 @@ rd_kafka_DeleteGroup_destroy_array(rd_kafka_DeleteGroup_t **del_groups, * * @remark The result event type emitted on the supplied queue is of type * \c RD_KAFKA_EVENT_DELETEGROUPS_RESULT + * + * @remark This function in called deleteConsumerGroups in the Java client. */ RD_EXPORT void rd_kafka_DeleteGroups(rd_kafka_t *rk, @@ -7406,13 +9032,206 @@ void rd_kafka_DeleteGroups(rd_kafka_t *rk, * @param result Result to get group results from. * @param cntp is updated to the number of elements in the array. */ -RD_EXPORT const rd_kafka_group_result_t **rd_kafka_DeleteGroups_result_groups( - const rd_kafka_DeleteGroups_result_t *result, +RD_EXPORT const rd_kafka_group_result_t **rd_kafka_DeleteGroups_result_groups( + const rd_kafka_DeleteGroups_result_t *result, + size_t *cntp); + +/**@}*/ + +/** + * @name Admin API - ListConsumerGroupOffsets + * @{ + * + * + */ + +/*! Represents consumer group committed offsets to be listed. */ +typedef struct rd_kafka_ListConsumerGroupOffsets_s + rd_kafka_ListConsumerGroupOffsets_t; + +/** + * @brief Create a new ListConsumerGroupOffsets object. + * This object is later passed to rd_kafka_ListConsumerGroupOffsets(). + * + * @param group_id Consumer group id. + * @param partitions Partitions to list committed offsets for. + * Only the topic and partition fields are used. + * + * @returns a new allocated ListConsumerGroupOffsets object. + * Use rd_kafka_ListConsumerGroupOffsets_destroy() to free + * object when done. + */ +RD_EXPORT rd_kafka_ListConsumerGroupOffsets_t * +rd_kafka_ListConsumerGroupOffsets_new( + const char *group_id, + const rd_kafka_topic_partition_list_t *partitions); + +/** + * @brief Destroy and free a ListConsumerGroupOffsets object previously + * created with rd_kafka_ListConsumerGroupOffsets_new() + */ +RD_EXPORT void rd_kafka_ListConsumerGroupOffsets_destroy( + rd_kafka_ListConsumerGroupOffsets_t *list_grpoffsets); + +/** + * @brief Helper function to destroy all ListConsumerGroupOffsets objects in + * the \p list_grpoffsets array (of \p list_grpoffsets_cnt elements). + * The array itself is not freed. + */ +RD_EXPORT void rd_kafka_ListConsumerGroupOffsets_destroy_array( + rd_kafka_ListConsumerGroupOffsets_t **list_grpoffsets, + size_t list_grpoffset_cnt); + +/** + * @brief List committed offsets for a set of partitions in a consumer + * group. + * + * @param rk Client instance. + * @param list_grpoffsets Array of group committed offsets to list. + * MUST only be one single element. + * @param list_grpoffsets_cnt Number of elements in \p list_grpoffsets array. + * MUST always be 1. + * @param options Optional admin options, or NULL for defaults. + * @param rkqu Queue to emit result on. + * + * @remark The result event type emitted on the supplied queue is of type + * \c RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT + * + * @remark The current implementation only supports one group per invocation. + */ +RD_EXPORT +void rd_kafka_ListConsumerGroupOffsets( + rd_kafka_t *rk, + rd_kafka_ListConsumerGroupOffsets_t **list_grpoffsets, + size_t list_grpoffsets_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); + + + +/* + * ListConsumerGroupOffsets result type and methods + */ + +/** + * @brief Get an array of results from a ListConsumerGroupOffsets result. + * + * The returned groups life-time is the same as the \p result object. + * + * @param result Result to get group results from. + * @param cntp is updated to the number of elements in the array. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p result object. + */ +RD_EXPORT const rd_kafka_group_result_t ** +rd_kafka_ListConsumerGroupOffsets_result_groups( + const rd_kafka_ListConsumerGroupOffsets_result_t *result, + size_t *cntp); + + + +/**@}*/ + +/** + * @name Admin API - AlterConsumerGroupOffsets + * @{ + * + * + */ + +/*! Represents consumer group committed offsets to be altered. */ +typedef struct rd_kafka_AlterConsumerGroupOffsets_s + rd_kafka_AlterConsumerGroupOffsets_t; + +/** + * @brief Create a new AlterConsumerGroupOffsets object. + * This object is later passed to rd_kafka_AlterConsumerGroupOffsets(). + * + * @param group_id Consumer group id. + * @param partitions Partitions to alter committed offsets for. + * Only the topic and partition fields are used. + * + * @returns a new allocated AlterConsumerGroupOffsets object. + * Use rd_kafka_AlterConsumerGroupOffsets_destroy() to free + * object when done. + */ +RD_EXPORT rd_kafka_AlterConsumerGroupOffsets_t * +rd_kafka_AlterConsumerGroupOffsets_new( + const char *group_id, + const rd_kafka_topic_partition_list_t *partitions); + +/** + * @brief Destroy and free a AlterConsumerGroupOffsets object previously + * created with rd_kafka_AlterConsumerGroupOffsets_new() + */ +RD_EXPORT void rd_kafka_AlterConsumerGroupOffsets_destroy( + rd_kafka_AlterConsumerGroupOffsets_t *alter_grpoffsets); + +/** + * @brief Helper function to destroy all AlterConsumerGroupOffsets objects in + * the \p alter_grpoffsets array (of \p alter_grpoffsets_cnt elements). + * The array itself is not freed. + */ +RD_EXPORT void rd_kafka_AlterConsumerGroupOffsets_destroy_array( + rd_kafka_AlterConsumerGroupOffsets_t **alter_grpoffsets, + size_t alter_grpoffset_cnt); + +/** + * @brief Alter committed offsets for a set of partitions in a consumer + * group. This will succeed at the partition level only if the group + * is not actively subscribed to the corresponding topic. + * + * @param rk Client instance. + * @param alter_grpoffsets Array of group committed offsets to alter. + * MUST only be one single element. + * @param alter_grpoffsets_cnt Number of elements in \p alter_grpoffsets array. + * MUST always be 1. + * @param options Optional admin options, or NULL for defaults. + * @param rkqu Queue to emit result on. + * + * @remark The result event type emitted on the supplied queue is of type + * \c RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT + * + * @remark The current implementation only supports one group per invocation. + */ +RD_EXPORT +void rd_kafka_AlterConsumerGroupOffsets( + rd_kafka_t *rk, + rd_kafka_AlterConsumerGroupOffsets_t **alter_grpoffsets, + size_t alter_grpoffsets_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); + + + +/* + * AlterConsumerGroupOffsets result type and methods + */ + +/** + * @brief Get an array of results from a AlterConsumerGroupOffsets result. + * + * The returned groups life-time is the same as the \p result object. + * + * @param result Result to get group results from. + * @param cntp is updated to the number of elements in the array. + * + * @remark The lifetime of the returned memory is the same + * as the lifetime of the \p result object. + */ +RD_EXPORT const rd_kafka_group_result_t ** +rd_kafka_AlterConsumerGroupOffsets_result_groups( + const rd_kafka_AlterConsumerGroupOffsets_result_t *result, size_t *cntp); -/* - * DeleteConsumerGroupOffsets - delete groups from cluster + +/**@}*/ + +/** + * @name Admin API - DeleteConsumerGroupOffsets + * @{ * * */ @@ -7455,7 +9274,7 @@ RD_EXPORT void rd_kafka_DeleteConsumerGroupOffsets_destroy_array( size_t del_grpoffset_cnt); /** - * @brief Delete committed offsets for a set of partitions in a conusmer + * @brief Delete committed offsets for a set of partitions in a consumer * group. This will succeed at the partition level only if the group * is not actively subscribed to the corresponding topic. * @@ -7499,70 +9318,364 @@ rd_kafka_DeleteConsumerGroupOffsets_result_groups( const rd_kafka_DeleteConsumerGroupOffsets_result_t *result, size_t *cntp); +/**@}*/ + /** - * @brief ACL Binding is used to create access control lists. + * @name Admin API - ListOffsets + * @brief Given a topic_partition list, provides the offset information. + * @{ + */ + +/** + * @enum rd_kafka_OffsetSpec_t + * @brief Allows to specify the desired offsets when using ListOffsets. + */ +typedef enum rd_kafka_OffsetSpec_t { + /* Used to retrieve the offset with the largest timestamp of a partition + * as message timestamps can be specified client side this may not match + * the log end offset returned by SPEC_LATEST. + */ + RD_KAFKA_OFFSET_SPEC_MAX_TIMESTAMP = -3, + /* Used to retrieve the offset with the earliest timestamp of a + partition. */ + RD_KAFKA_OFFSET_SPEC_EARLIEST = -2, + /* Used to retrieve the offset with the latest timestamp of a partition. + */ + RD_KAFKA_OFFSET_SPEC_LATEST = -1, +} rd_kafka_OffsetSpec_t; + +/** + * @brief Information returned from a ListOffsets call for a specific + * `rd_kafka_topic_partition_t`. + */ +typedef struct rd_kafka_ListOffsetsResultInfo_s + rd_kafka_ListOffsetsResultInfo_t; + +/** + * @brief Returns the topic partition of the passed \p result_info. + */ +RD_EXPORT +const rd_kafka_topic_partition_t * +rd_kafka_ListOffsetsResultInfo_topic_partition( + const rd_kafka_ListOffsetsResultInfo_t *result_info); + +/** + * @brief Returns the timestamp corresponding to the offset in \p result_info. + */ +RD_EXPORT +int64_t rd_kafka_ListOffsetsResultInfo_timestamp( + const rd_kafka_ListOffsetsResultInfo_t *result_info); + +/** + * @brief Returns the array of ListOffsetsResultInfo in \p result + * and populates the size of the array in \p cntp. + */ +RD_EXPORT +const rd_kafka_ListOffsetsResultInfo_t ** +rd_kafka_ListOffsets_result_infos(const rd_kafka_ListOffsets_result_t *result, + size_t *cntp); + +/** + * @brief List offsets for the specified \p topic_partitions. + * This operation enables to find the beginning offset, + * end offset as well as the offset matching a timestamp in partitions + * or the offset with max timestamp. + * + * @param rk Client instance. + * @param topic_partitions topic_partition_list_t with the partitions and + * offsets to list. Each topic partition offset can be + * a value of the `rd_kafka_OffsetSpec_t` enum or + * a non-negative value, representing a timestamp, + * to query for the first offset after the + * given timestamp. + * @param options Optional admin options, or NULL for defaults. + * @param rkqu Queue to emit result on. * + * Supported admin options: + * - rd_kafka_AdminOptions_set_isolation_level() - default \c + * RD_KAFKA_ISOLATION_LEVEL_READ_UNCOMMITTED + * - rd_kafka_AdminOptions_set_request_timeout() - default socket.timeout.ms * + * @remark The result event type emitted on the supplied queue is of type + * \c RD_KAFKA_EVENT_LISTOFFSETS_RESULT */ -typedef struct rd_kafka_AclBinding_s rd_kafka_AclBinding_t; +RD_EXPORT +void rd_kafka_ListOffsets(rd_kafka_t *rk, + rd_kafka_topic_partition_list_t *topic_partitions, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); + +/**@}*/ /** - * @brief ACL Binding filter is used to filter access control lists. + * @name Admin API - User SCRAM credentials + * @{ + */ + +/** + * @enum rd_kafka_ScramMechanism_t + * @brief Apache Kafka ScramMechanism values. + */ +typedef enum rd_kafka_ScramMechanism_t { + RD_KAFKA_SCRAM_MECHANISM_UNKNOWN = 0, + RD_KAFKA_SCRAM_MECHANISM_SHA_256 = 1, + RD_KAFKA_SCRAM_MECHANISM_SHA_512 = 2, + RD_KAFKA_SCRAM_MECHANISM__CNT +} rd_kafka_ScramMechanism_t; + +/** + * @brief Scram credential info. + * Mechanism and iterations for a SASL/SCRAM + * credential associated with a user. + */ +typedef struct rd_kafka_ScramCredentialInfo_s rd_kafka_ScramCredentialInfo_t; + +/** + * @brief Returns the mechanism of a given ScramCredentialInfo. + */ +RD_EXPORT +rd_kafka_ScramMechanism_t rd_kafka_ScramCredentialInfo_mechanism( + const rd_kafka_ScramCredentialInfo_t *scram_credential_info); + +/** + * @brief Returns the iterations of a given ScramCredentialInfo. + */ +RD_EXPORT +int32_t rd_kafka_ScramCredentialInfo_iterations( + const rd_kafka_ScramCredentialInfo_t *scram_credential_info); + +/** + * @brief Representation of all SASL/SCRAM credentials associated + * with a user that can be retrieved, + * or an error indicating why credentials + * could not be retrieved. + */ +typedef struct rd_kafka_UserScramCredentialsDescription_s + rd_kafka_UserScramCredentialsDescription_t; + +/** + * @brief Returns the username of a UserScramCredentialsDescription. + */ +RD_EXPORT +const char *rd_kafka_UserScramCredentialsDescription_user( + const rd_kafka_UserScramCredentialsDescription_t *description); + +/** + * @brief Returns the error associated with a UserScramCredentialsDescription. + */ +RD_EXPORT +const rd_kafka_error_t *rd_kafka_UserScramCredentialsDescription_error( + const rd_kafka_UserScramCredentialsDescription_t *description); + +/** + * @brief Returns the count of ScramCredentialInfos of a + * UserScramCredentialsDescription. + */ +RD_EXPORT +size_t rd_kafka_UserScramCredentialsDescription_scramcredentialinfo_count( + const rd_kafka_UserScramCredentialsDescription_t *description); + +/** + * @brief Returns the ScramCredentialInfo at index idx of + * UserScramCredentialsDescription. + */ +RD_EXPORT +const rd_kafka_ScramCredentialInfo_t * +rd_kafka_UserScramCredentialsDescription_scramcredentialinfo( + const rd_kafka_UserScramCredentialsDescription_t *description, + size_t idx); + +/** + * @brief Get an array of descriptions from a DescribeUserScramCredentials + * result. + * + * The returned value life-time is the same as the \p result object. * + * @param result Result to get descriptions from. + * @param cntp is updated to the number of elements in the array. */ -typedef rd_kafka_AclBinding_t rd_kafka_AclBindingFilter_t; +RD_EXPORT +const rd_kafka_UserScramCredentialsDescription_t ** +rd_kafka_DescribeUserScramCredentials_result_descriptions( + const rd_kafka_DescribeUserScramCredentials_result_t *result, + size_t *cntp); /** - * @returns the error object for the given acl result, or NULL on success. + * @brief Describe SASL/SCRAM credentials. + * This operation is supported by brokers with version 2.7.0 or higher. + * + * @param rk Client instance. + * @param users The users for which credentials are to be described. + * All users' credentials are described if NULL. + * @param user_cnt Number of elements in \p users array. + * @param options Optional admin options, or NULL for defaults. + * @param rkqu Queue to emit result on. */ -RD_EXPORT const rd_kafka_error_t * -rd_kafka_acl_result_error(const rd_kafka_acl_result_t *aclres); +RD_EXPORT +void rd_kafka_DescribeUserScramCredentials( + rd_kafka_t *rk, + const char **users, + size_t user_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); +/** + * @brief A request to alter a user's SASL/SCRAM credentials. + */ +typedef struct rd_kafka_UserScramCredentialAlteration_s + rd_kafka_UserScramCredentialAlteration_t; /** - * @name AclOperation - * @{ + * @brief Allocates a new UserScramCredentialUpsertion given its fields. + * If salt isn't given a 64 B salt is generated using OpenSSL + * RAND_priv_bytes, if available. + * + * @param username The username (not empty). + * @param mechanism SASL/SCRAM mechanism. + * @param iterations SASL/SCRAM iterations. + * @param password Password bytes (not empty). + * @param password_size Size of \p password (greater than 0). + * @param salt Salt bytes (optional). + * @param salt_size Size of \p salt (optional). + * + * @remark A random salt is generated, when NULL, only if OpenSSL >= 1.1.1. + * Otherwise it's a required param. + * + * @return A newly created instance of rd_kafka_UserScramCredentialAlteration_t. + * Ownership belongs to the caller, use + * rd_kafka_UserScramCredentialAlteration_destroy to destroy. */ +RD_EXPORT +rd_kafka_UserScramCredentialAlteration_t * +rd_kafka_UserScramCredentialUpsertion_new(const char *username, + rd_kafka_ScramMechanism_t mechanism, + int32_t iterations, + const unsigned char *password, + size_t password_size, + const unsigned char *salt, + size_t salt_size); /** - * @enum rd_kafka_AclOperation_t - * @brief Apache Kafka ACL operation types. + * @brief Allocates a new UserScramCredentialDeletion given its fields. + * + * @param username The username (not empty). + * @param mechanism SASL/SCRAM mechanism. + * @return A newly created instance of rd_kafka_UserScramCredentialAlteration_t. + * Ownership belongs to the caller, use + * rd_kafka_UserScramCredentialAlteration_destroy to destroy. */ -typedef enum rd_kafka_AclOperation_t { - RD_KAFKA_ACL_OPERATION_UNKNOWN = 0, /**< Unknown */ - RD_KAFKA_ACL_OPERATION_ANY = - 1, /**< In a filter, matches any AclOperation */ - RD_KAFKA_ACL_OPERATION_ALL = 2, /**< ALL operation */ - RD_KAFKA_ACL_OPERATION_READ = 3, /**< READ operation */ - RD_KAFKA_ACL_OPERATION_WRITE = 4, /**< WRITE operation */ - RD_KAFKA_ACL_OPERATION_CREATE = 5, /**< CREATE operation */ - RD_KAFKA_ACL_OPERATION_DELETE = 6, /**< DELETE operation */ - RD_KAFKA_ACL_OPERATION_ALTER = 7, /**< ALTER operation */ - RD_KAFKA_ACL_OPERATION_DESCRIBE = 8, /**< DESCRIBE operation */ - RD_KAFKA_ACL_OPERATION_CLUSTER_ACTION = - 9, /**< CLUSTER_ACTION operation */ - RD_KAFKA_ACL_OPERATION_DESCRIBE_CONFIGS = - 10, /**< DESCRIBE_CONFIGS operation */ - RD_KAFKA_ACL_OPERATION_ALTER_CONFIGS = - 11, /**< ALTER_CONFIGS operation */ - RD_KAFKA_ACL_OPERATION_IDEMPOTENT_WRITE = - 12, /**< IDEMPOTENT_WRITE operation */ - RD_KAFKA_ACL_OPERATION__CNT -} rd_kafka_AclOperation_t; +RD_EXPORT +rd_kafka_UserScramCredentialAlteration_t * +rd_kafka_UserScramCredentialDeletion_new(const char *username, + rd_kafka_ScramMechanism_t mechanism); + /** - * @returns a string representation of the \p acl_operation + * @brief Destroys a UserScramCredentialAlteration given its pointer */ -RD_EXPORT const char * -rd_kafka_AclOperation_name(rd_kafka_AclOperation_t acl_operation); +RD_EXPORT +void rd_kafka_UserScramCredentialAlteration_destroy( + rd_kafka_UserScramCredentialAlteration_t *alteration); + +/** + * @brief Destroys an array of UserScramCredentialAlteration + */ +RD_EXPORT +void rd_kafka_UserScramCredentialAlteration_destroy_array( + rd_kafka_UserScramCredentialAlteration_t **alterations, + size_t alteration_cnt); + +/** + * @brief Result of a single user SCRAM alteration. + */ +typedef struct rd_kafka_AlterUserScramCredentials_result_response_s + rd_kafka_AlterUserScramCredentials_result_response_t; + +/** + * @brief Returns the username for a + * rd_kafka_AlterUserScramCredentials_result_response. + */ +RD_EXPORT +const char *rd_kafka_AlterUserScramCredentials_result_response_user( + const rd_kafka_AlterUserScramCredentials_result_response_t *response); + +/** + * @brief Returns the error of a + * rd_kafka_AlterUserScramCredentials_result_response. + */ +RD_EXPORT +const rd_kafka_error_t * +rd_kafka_AlterUserScramCredentials_result_response_error( + const rd_kafka_AlterUserScramCredentials_result_response_t *response); + +/** + * @brief Get an array of responses from a AlterUserScramCredentials result. + * + * The returned value life-time is the same as the \p result object. + * + * @param result Result to get responses from. + * @param cntp is updated to the number of elements in the array. + */ +RD_EXPORT +const rd_kafka_AlterUserScramCredentials_result_response_t ** +rd_kafka_AlterUserScramCredentials_result_responses( + const rd_kafka_AlterUserScramCredentials_result_t *result, + size_t *cntp); + +/** + * @brief Alter SASL/SCRAM credentials. + * This operation is supported by brokers with version 2.7.0 or higher. + * + * @remark For upsertions to be processed, librdkfka must be build with + * OpenSSL support. It's needed to calculate the HMAC. + * + * @param rk Client instance. + * @param alterations The alterations to be applied. + * @param alteration_cnt Number of elements in \p alterations array. + * @param options Optional admin options, or NULL for defaults. + * @param rkqu Queue to emit result on. + */ +RD_EXPORT +void rd_kafka_AlterUserScramCredentials( + rd_kafka_t *rk, + rd_kafka_UserScramCredentialAlteration_t **alterations, + size_t alteration_cnt, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); /**@}*/ /** - * @name AclPermissionType + * @name Admin API - ACL operations * @{ */ +/** + * @brief ACL Binding is used to create access control lists. + * + * + */ +typedef struct rd_kafka_AclBinding_s rd_kafka_AclBinding_t; + +/** + * @brief ACL Binding filter is used to filter access control lists. + * + */ +typedef rd_kafka_AclBinding_t rd_kafka_AclBindingFilter_t; + +/** + * @returns the error object for the given acl result, or NULL on success. + */ +RD_EXPORT const rd_kafka_error_t * +rd_kafka_acl_result_error(const rd_kafka_acl_result_t *aclres); + + +/** + * @returns a string representation of the \p acl_operation + */ +RD_EXPORT const char * +rd_kafka_AclOperation_name(rd_kafka_AclOperation_t acl_operation); + /** * @enum rd_kafka_AclPermissionType_t * @brief Apache Kafka ACL permission types. @@ -7582,8 +9695,6 @@ typedef enum rd_kafka_AclPermissionType_t { RD_EXPORT const char *rd_kafka_AclPermissionType_name( rd_kafka_AclPermissionType_t acl_permission_type); -/**@}*/ - /** * @brief Create a new AclBinding object. This object is later passed to * rd_kafka_CreateAcls(). @@ -7754,7 +9865,7 @@ RD_EXPORT void rd_kafka_CreateAcls(rd_kafka_t *rk, rd_kafka_queue_t *rkqu); /** - * @section DescribeAcls - describe access control lists. + * DescribeAcls - describe access control lists. * * */ @@ -7790,7 +9901,7 @@ RD_EXPORT void rd_kafka_DescribeAcls(rd_kafka_t *rk, rd_kafka_queue_t *rkqu); /** - * @section DeleteAcls - delete access control lists. + * DeleteAcls - delete access control lists. * * */ @@ -7852,6 +9963,100 @@ RD_EXPORT void rd_kafka_DeleteAcls(rd_kafka_t *rk, /**@}*/ +/** + * @name Admin API - Elect Leaders + * @{ + * + * + * + */ + +/** + * @brief Represents elect leaders request. + */ +typedef struct rd_kafka_ElectLeaders_s rd_kafka_ElectLeaders_t; + +/** + * @enum rd_kafka_ElectionType_t + * @brief Apache Kafka Election Types + */ +typedef enum rd_kafka_ElectionType_t { + RD_KAFKA_ELECTION_TYPE_PREFERRED = 0, /**< Preferred Replica Election */ + RD_KAFKA_ELECTION_TYPE_UNCLEAN = 1, /**< Unclean Election */ +} rd_kafka_ElectionType_t; + +/** + * @brief Create a new rd_kafka_ElectLeaders_t object. This object is later + * passed to rd_kafka_ElectLeaders(). + * + * @param election_type The election type that needs to be performed, + * preferred or unclean. + * @param partitions The topic partitions for which the leader election + * needs to be performed. + * + * @returns a new allocated elect leaders object or returns NULL in case + * of invalid election_type. + * Use rd_kafka_ElectLeaders_destroy() to free object when done. + */ +RD_EXPORT rd_kafka_ElectLeaders_t * +rd_kafka_ElectLeaders_new(rd_kafka_ElectionType_t election_type, + rd_kafka_topic_partition_list_t *partitions); + +/** + * @brief Destroy and free a rd_kafka_ElectLeaders_t object previously created + * with rd_kafka_ElectLeaders_new() + * + * @param elect_leaders The rd_kafka_ElectLeaders_t object to be destroyed. + */ +RD_EXPORT void +rd_kafka_ElectLeaders_destroy(rd_kafka_ElectLeaders_t *elect_leaders); + +/** + * @brief Elect Leaders for the provided Topic Partitions + * according to the specified election type. + * + * @param rk Client instance. + * @param elect_leaders The elect leaders request containing + * election type and partitions information. + * @param options Optional admin options, or NULL for defaults. + * @param rkqu Queue to emit result on. + * + * Supported admin options: + * - rd_kafka_AdminOptions_set_operation_timeout() - default 60 seconds. + * Controls how long the brokers will wait for records to be deleted. + * - rd_kafka_AdminOptions_set_request_timeout() - default socket.timeout.ms. + * Controls how long \c rdkafka will wait for the request to complete. + * + * @remark The result event type emitted on the supplied queue is of type + * \c RD_KAFKA_EVENT_ELECTLEADERS_RESULT + * @remark If we are passing partitions as NULL, then the broker + * will attempt leader election for all partitions, but the results + * will contain only partitions for which there was an election or + * resulted in an error. + */ +RD_EXPORT void rd_kafka_ElectLeaders(rd_kafka_t *rk, + rd_kafka_ElectLeaders_t *elect_leaders, + const rd_kafka_AdminOptions_t *options, + rd_kafka_queue_t *rkqu); + +/** + * @brief Get the array of topic partition result objects from the + * elect leaders result event and populates the size of the + * array in \p cntp. + * + * @param result The elect leaders result. + * @param cntp The number of elements in the array. + * + * @returns the array of topic partition result objects from the + * elect leaders result event. + */ +RD_EXPORT const rd_kafka_topic_partition_result_t ** +rd_kafka_ElectLeaders_result_partitions( + const rd_kafka_ElectLeaders_result_t *result, + size_t *cntp); + +/**@}*/ + /** * @name Security APIs * @{ @@ -8025,7 +10230,7 @@ rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token_failure(rd_kafka_t *rk, * the global rd_kafka_fatal_error() code. * Fatal errors are raised by triggering the \c error_cb (see the * Fatal error chapter in INTRODUCTION.md for more information), and any - * sub-sequent transactional API calls will return RD_KAFKA_RESP_ERR__FATAL + * subsequent transactional API calls will return RD_KAFKA_RESP_ERR__FATAL * or have the fatal flag set (see rd_kafka_error_is_fatal()). * The originating fatal error code can be retrieved by calling * rd_kafka_fatal_error(). @@ -8085,9 +10290,15 @@ rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token_failure(rd_kafka_t *rk, * @param timeout_ms The maximum time to block. On timeout the operation * may continue in the background, depending on state, * and it is okay to call init_transactions() again. + * If an infinite timeout (-1) is passed, the timeout will + * be adjusted to 2 * \c transaction.timeout.ms. * * @remark This function may block up to \p timeout_ms milliseconds. * + * @remark This call is resumable when a retriable timeout error is returned. + * Calling the function again will resume the operation that is + * progressing in the background. + * * @returns NULL on success or an error object on failure. * Check whether the returned error object permits retrying * by calling rd_kafka_error_is_retriable(), or whether a fatal @@ -8203,8 +10414,17 @@ rd_kafka_error_t *rd_kafka_begin_transaction(rd_kafka_t *rk); * * @remark Logical and invalid offsets (such as RD_KAFKA_OFFSET_INVALID) in * \p offsets will be ignored, if there are no valid offsets in - * \p offsets the function will return RD_KAFKA_RESP_ERR_NO_ERROR - * and no action will be taken. + * \p offsets the function will return NULL and no action will be taken. + * + * @remark This call is retriable but not resumable, which means a new request + * with a new set of provided offsets and group metadata will be + * sent to the transaction coordinator if the call is retried. + * + * @remark It is highly recommended to retry the call (upon retriable error) + * with identical \p offsets and \p cgmetadata parameters. + * Failure to do so risks inconsistent state between what is actually + * included in the transaction and what the application thinks is + * included in the transaction. * * @returns NULL on success or an error object on failure. * Check whether the returned error object permits retrying @@ -8225,9 +10445,7 @@ rd_kafka_error_t *rd_kafka_begin_transaction(rd_kafka_t *rk); * RD_KAFKA_RESP_ERR__NOT_CONFIGURED if transactions have not been * configured for the producer instance, * RD_KAFKA_RESP_ERR__INVALID_ARG if \p rk is not a producer instance, - * or if the \p consumer_group_id or \p offsets are empty, - * RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS if a previous - * rd_kafka_send_offsets_to_transaction() call is still in progress. + * or if the \p consumer_group_id or \p offsets are empty. * Other error codes not listed here may be returned, depending on * broker version. * @@ -8280,6 +10498,10 @@ rd_kafka_error_t *rd_kafka_send_offsets_to_transaction( * serve the event queue in a separate thread since rd_kafka_flush() * will not serve delivery reports in this mode. * + * @remark This call is resumable when a retriable timeout error is returned. + * Calling the function again will resume the operation that is + * progressing in the background. + * * @returns NULL on success or an error object on failure. * Check whether the returned error object permits retrying * by calling rd_kafka_error_is_retriable(), or whether an abortable @@ -8339,7 +10561,10 @@ rd_kafka_error_t *rd_kafka_commit_transaction(rd_kafka_t *rk, int timeout_ms); * If the application has enabled RD_KAFKA_EVENT_DR it must * serve the event queue in a separate thread since rd_kafka_flush() * will not serve delivery reports in this mode. - + * + * @remark This call is resumable when a retriable timeout error is returned. + * Calling the function again will resume the operation that is + * progressing in the background. * * @returns NULL on success or an error object on failure. * Check whether the returned error object permits retrying diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/rdkafka_mock.h b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/rdkafka_mock.h similarity index 100% rename from vendor/github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor/rdkafka_mock.h rename to vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor/rdkafka_mock.h diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/log.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/log.go similarity index 100% rename from vendor/github.com/confluentinc/confluent-kafka-go/kafka/log.go rename to vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/log.go diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/message.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/message.go similarity index 92% rename from vendor/github.com/confluentinc/confluent-kafka-go/kafka/message.go rename to vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/message.go index b9952bac..d473771a 100644 --- a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/message.go +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/message.go @@ -44,16 +44,15 @@ void setup_rkmessage (rd_kafka_message_t *rkmessage, import "C" // TimestampType is a the Message timestamp type or source -// type TimestampType int const ( // TimestampNotAvailable indicates no timestamp was set, or not available due to lacking broker support - TimestampNotAvailable = TimestampType(C.RD_KAFKA_TIMESTAMP_NOT_AVAILABLE) + TimestampNotAvailable TimestampType = C.RD_KAFKA_TIMESTAMP_NOT_AVAILABLE // TimestampCreateTime indicates timestamp set by producer (source time) - TimestampCreateTime = TimestampType(C.RD_KAFKA_TIMESTAMP_CREATE_TIME) + TimestampCreateTime TimestampType = C.RD_KAFKA_TIMESTAMP_CREATE_TIME // TimestampLogAppendTime indicates timestamp set set by broker (store time) - TimestampLogAppendTime = TimestampType(C.RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME) + TimestampLogAppendTime TimestampType = C.RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME ) func (t TimestampType) String() string { @@ -78,6 +77,7 @@ type Message struct { TimestampType TimestampType Opaque interface{} Headers []Header + LeaderEpoch *int32 // Deprecated: LeaderEpoch or nil if not available. Use m.TopicPartition.LeaderEpoch instead. } // String returns a human readable representation of a Message. @@ -161,6 +161,12 @@ func (h *handle) setupMessageFromC(msg *Message, cmsg *C.rd_kafka_message_t) { if cmsg.err != 0 { msg.TopicPartition.Error = newError(cmsg.err) } + + leaderEpoch := int32(C.rd_kafka_message_leader_epoch(cmsg)) + if leaderEpoch >= 0 { + msg.LeaderEpoch = &leaderEpoch + msg.TopicPartition.LeaderEpoch = &leaderEpoch + } } // newMessageFromC creates a new message object from a C rd_kafka_message_t diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/metadata.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/metadata.go similarity index 100% rename from vendor/github.com/confluentinc/confluent-kafka-go/kafka/metadata.go rename to vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/metadata.go diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/misc.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/misc.go similarity index 100% rename from vendor/github.com/confluentinc/confluent-kafka-go/kafka/misc.go rename to vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/misc.go diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/mockcluster.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/mockcluster.go similarity index 57% rename from vendor/github.com/confluentinc/confluent-kafka-go/kafka/mockcluster.go rename to vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/mockcluster.go index 62ac7ff5..7b16b729 100644 --- a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/mockcluster.go +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/mockcluster.go @@ -1,5 +1,5 @@ /** - * Copyright 2022 Confluent Inc. + * Copyright 2023 Confluent Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,7 +16,10 @@ package kafka -import "unsafe" +import ( + "time" + "unsafe" +) /* #include @@ -36,6 +39,8 @@ type MockCluster struct { // number of brokers that support a reasonable subset of Kafka protocol // operations, error injection, etc. // +// The broker ids will start at 1 up to and including brokerCount. +// // Mock clusters provide localhost listeners that can be used as the bootstrap // servers by multiple Kafka client instances. // @@ -77,6 +82,51 @@ func (mc *MockCluster) BootstrapServers() string { return C.GoString(C.rd_kafka_mock_cluster_bootstraps(mc.mcluster)) } +// SetRoundtripDuration sets the broker round-trip-time delay for the given broker. +// Use brokerID -1 for all brokers, or >= 0 for a specific broker. +func (mc *MockCluster) SetRoundtripDuration(brokerID int, duration time.Duration) error { + durationInMillis := C.int(duration.Milliseconds()) + cError := C.rd_kafka_mock_broker_set_rtt(mc.mcluster, C.int(brokerID), durationInMillis) + if cError != C.RD_KAFKA_RESP_ERR_NO_ERROR { + return newError(cError) + } + return nil +} + +// SetBrokerDown disconnects the broker and disallows any new connections. +// This does NOT trigger leader change. +// Use brokerID -1 for all brokers, or >= 0 for a specific broker. +func (mc *MockCluster) SetBrokerDown(brokerID int) error { + cError := C.rd_kafka_mock_broker_set_down(mc.mcluster, C.int(brokerID)) + if cError != C.RD_KAFKA_RESP_ERR_NO_ERROR { + return newError(cError) + } + return nil +} + +// SetBrokerUp makes the broker accept connections again. +// This does NOT trigger leader change. +// Use brokerID -1 for all brokers, or >= 0 for a specific broker. +func (mc *MockCluster) SetBrokerUp(brokerID int) error { + cError := C.rd_kafka_mock_broker_set_up(mc.mcluster, C.int(brokerID)) + if cError != C.RD_KAFKA_RESP_ERR_NO_ERROR { + return newError(cError) + } + return nil +} + +// CreateTopic creates a topic without having to use a producer +func (mc *MockCluster) CreateTopic(topic string, partitions, replicationFactor int) error { + topicStr := C.CString(topic) + defer C.free(unsafe.Pointer(topicStr)) + + cError := C.rd_kafka_mock_topic_create(mc.mcluster, topicStr, C.int(partitions), C.int(replicationFactor)) + if cError != C.RD_KAFKA_RESP_ERR_NO_ERROR { + return newError(cError) + } + return nil +} + // Close and destroy the MockCluster func (mc *MockCluster) Close() { C.rd_kafka_mock_cluster_destroy(mc.mcluster) diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/offset.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/offset.go similarity index 97% rename from vendor/github.com/confluentinc/confluent-kafka-go/kafka/offset.go rename to vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/offset.go index 4cb1819c..c0d27f95 100644 --- a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/offset.go +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/offset.go @@ -72,8 +72,8 @@ func (o *Offset) Set(offset interface{}) error { return err } -// NewOffset creates a new Offset using the provided logical string, or an -// absolute int64 offset value. +// NewOffset creates a new Offset using the provided logical string, an +// absolute int64 offset value, or a concrete Offset type. // Logical offsets: "beginning", "earliest", "end", "latest", "unset", "invalid", "stored" func NewOffset(offset interface{}) (Offset, error) { @@ -107,6 +107,8 @@ func NewOffset(offset interface{}) (Offset, error) { return Offset((int64)(v)), nil case int64: return Offset(v), nil + case Offset: + return Offset(v), nil default: return OffsetInvalid, newErrorFromString(ErrInvalidArg, fmt.Sprintf("Invalid offset type: %t", v)) diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/producer.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/producer.go similarity index 83% rename from vendor/github.com/confluentinc/confluent-kafka-go/kafka/producer.go rename to vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/producer.go index 34ce2ece..47bc2ee9 100644 --- a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/producer.go +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/producer.go @@ -19,7 +19,7 @@ package kafka import ( "context" "fmt" - "math" + "sync/atomic" "time" "unsafe" ) @@ -128,6 +128,15 @@ rd_kafka_resp_err_t err; */ import "C" +// minInt64 finds the minimum of two int64s. +// Required until we start using Go 1.18 with generic functions. +func minInt64(a int64, b int64) int64 { + if a > b { + return b + } + return a +} + // Producer implements a High-level Apache Kafka Producer instance type Producer struct { events chan Event @@ -136,6 +145,21 @@ type Producer struct { // Terminates the poller() goroutine pollerTermChan chan bool + + // checks if Producer has been closed or not. + isClosed uint32 +} + +// IsClosed returns boolean representing if client is closed or not +func (p *Producer) IsClosed() bool { + return atomic.LoadUint32(&p.isClosed) == 1 +} + +func (p *Producer) verifyClient() error { + if p.IsClosed() { + return getOperationNotAllowedErrorForClosedClient() + } + return nil } // String returns a human readable name for a Producer instance @@ -282,6 +306,10 @@ func (p *Producer) produce(msg *Message, msgFlags int, deliveryChan chan Event) // api.version.request=true, and broker >= 0.11.0.0. // Returns an error if message could not be enqueued. func (p *Producer) Produce(msg *Message, deliveryChan chan Event) error { + err := p.verifyClient() + if err != nil { + return err + } return p.produce(msg, 0, deliveryChan) } @@ -317,34 +345,58 @@ func (p *Producer) Logs() chan LogEvent { } // ProduceChannel returns the produce *Message channel (write) +// +// Deprecated: ProduceChannel (channel based producer) is deprecated in favour +// of Produce(). +// Flush() and Len() are not guaranteed to be reliable with ProduceChannel. func (p *Producer) ProduceChannel() chan *Message { return p.produceChannel } // Len returns the number of messages and requests waiting to be transmitted to the broker // as well as delivery reports queued for the application. -// Includes messages on ProduceChannel. +// BUG: Tries to include messages on ProduceChannel, but it's not guaranteed to be reliable. func (p *Producer) Len() int { return len(p.produceChannel) + len(p.events) + int(C.rd_kafka_outq_len(p.handle.rk)) } // Flush and wait for outstanding messages and requests to complete delivery. -// Includes messages on ProduceChannel. // Runs until value reaches zero or on timeoutMs. // Returns the number of outstanding events still un-flushed. +// BUG: Tries to include messages on ProduceChannel, but it's not guaranteed to be reliable. func (p *Producer) Flush(timeoutMs int) int { termChan := make(chan bool) // unused stand-in termChan + // used to specify timeout for the underlying flush + flushIntervalChan := make(chan int64) + + // Keep calling rd_kafka_flush to ignore queue.buffering.max.ms, and + // account for any other state changes that the underlying library + // might do in case it is flushing. + go func() { + for flushInterval := range flushIntervalChan { + C.rd_kafka_flush(p.handle.rk, C.int(flushInterval)) + } + }() - d, _ := time.ParseDuration(fmt.Sprintf("%dms", timeoutMs)) - tEnd := time.Now().Add(d) + defer close(flushIntervalChan) + + timeoutDuration := time.Duration(timeoutMs) * time.Millisecond + tEnd := time.Now().Add(timeoutDuration) for p.Len() > 0 { - remain := tEnd.Sub(time.Now()).Seconds() - if remain <= 0.0 { + remain := time.Until(tEnd).Milliseconds() + if remain <= 0 { return p.Len() } + tWait := minInt64(100, remain) + + // If the previous eventPoll returned immediately, and the previous + // rd_kafka_flush did not, we'll end up in a situation where this + // channel blocks. However, this is acceptable as it may block for + // a maximum of 100ms. + flushIntervalChan <- tWait p.handle.eventPoll(p.events, - int(math.Min(100, remain*1000)), 1000, termChan) + int(tWait), 1000, termChan) } return 0 @@ -353,6 +405,10 @@ func (p *Producer) Flush(timeoutMs int) int { // Close a Producer instance. // The Producer object or its channels are no longer usable after this call. func (p *Producer) Close() { + if !atomic.CompareAndSwapUint32(&p.isClosed, 0, 1) { + return + } + // Wait for poller() (signaled by closing pollerTermChan) // and channel_producer() (signaled by closing ProduceChannel) close(p.pollerTermChan) @@ -405,6 +461,10 @@ const ( // // Returns nil on success, ErrInvalidArg if the purge flags are invalid or unknown. func (p *Producer) Purge(flags int) error { + err := p.verifyClient() + if err != nil { + return err + } cErr := C.rd_kafka_purge(p.handle.rk, C.int(flags)) if cErr != C.RD_KAFKA_RESP_ERR_NO_ERROR { return newError(cErr) @@ -418,19 +478,19 @@ func (p *Producer) Purge(flags int) error { // conf is a *ConfigMap with standard librdkafka configuration properties. // // Supported special configuration properties (type, default): -// go.batch.producer (bool, false) - EXPERIMENTAL: Enable batch producer (for increased performance). -// These batches do not relate to Kafka message batches in any way. -// Note: timestamps and headers are not supported with this interface. -// go.delivery.reports (bool, true) - Forward per-message delivery reports to the -// Events() channel. -// go.delivery.report.fields (string, "key,value") - Comma separated list of fields to enable for delivery reports. -// Allowed values: all, none (or empty string), key, value, headers -// Warning: There is a performance penalty to include headers in the delivery report. -// go.events.channel.size (int, 1000000) - Events(). -// go.produce.channel.size (int, 1000000) - ProduceChannel() buffer size (in number of messages) -// go.logs.channel.enable (bool, false) - Forward log to Logs() channel. -// go.logs.channel (chan kafka.LogEvent, nil) - Forward logs to application-provided channel instead of Logs(). Requires go.logs.channel.enable=true. // +// go.batch.producer (bool, false) - EXPERIMENTAL: Enable batch producer (for increased performance). +// These batches do not relate to Kafka message batches in any way. +// Note: timestamps and headers are not supported with this interface. +// go.delivery.reports (bool, true) - Forward per-message delivery reports to the +// Events() channel. +// go.delivery.report.fields (string, "key,value") - Comma separated list of fields to enable for delivery reports. +// Allowed values: all, none (or empty string), key, value, headers +// Warning: There is a performance penalty to include headers in the delivery report. +// go.events.channel.size (int, 1000000) - Events(). +// go.produce.channel.size (int, 1000000) - ProduceChannel() buffer size (in number of messages) +// go.logs.channel.enable (bool, false) - Forward log to Logs() channel. +// go.logs.channel (chan kafka.LogEvent, nil) - Forward logs to application-provided channel instead of Logs(). Requires go.logs.channel.enable=true. func NewProducer(conf *ConfigMap) (*Producer, error) { err := versionCheck() @@ -523,6 +583,7 @@ func NewProducer(conf *ConfigMap) (*Producer, error) { p.events = make(chan Event, eventsChanSize) p.produceChannel = make(chan *Message, produceChannelSize) p.pollerTermChan = make(chan bool) + p.isClosed = 0 if logsChanEnable { p.handle.setupLogQueue(logsChan, p.pollerTermChan) @@ -639,12 +700,20 @@ func poller(p *Producer, termChan chan bool) { // else information about all topics is returned. // GetMetadata is equivalent to listTopics, describeTopics and describeCluster in the Java API. func (p *Producer) GetMetadata(topic *string, allTopics bool, timeoutMs int) (*Metadata, error) { + err := p.verifyClient() + if err != nil { + return nil, err + } return getMetadata(p, topic, allTopics, timeoutMs) } // QueryWatermarkOffsets returns the broker's low and high offsets for the given topic // and partition. func (p *Producer) QueryWatermarkOffsets(topic string, partition int32, timeoutMs int) (low, high int64, err error) { + err = p.verifyClient() + if err != nil { + return -1, -1, err + } return queryWatermarkOffsets(p, topic, partition, timeoutMs) } @@ -664,11 +733,19 @@ func (p *Producer) QueryWatermarkOffsets(topic string, partition int32, timeoutM // Duplicate Topic+Partitions are not supported. // Per-partition errors may be returned in the `.Error` field. func (p *Producer) OffsetsForTimes(times []TopicPartition, timeoutMs int) (offsets []TopicPartition, err error) { + err = p.verifyClient() + if err != nil { + return nil, err + } return offsetsForTimes(p, times, timeoutMs) } // GetFatalError returns an Error object if the client instance has raised a fatal error, else nil. func (p *Producer) GetFatalError() error { + err := p.verifyClient() + if err != nil { + return err + } return getFatalError(p) } @@ -689,6 +766,10 @@ func (p *Producer) TestFatalError(code ErrorCode, str string) ErrorCode { // 3) SASL/OAUTHBEARER is supported but is not configured as the client's // authentication mechanism. func (p *Producer) SetOAuthBearerToken(oauthBearerToken OAuthBearerToken) error { + err := p.verifyClient() + if err != nil { + return err + } return p.handle.setOAuthBearerToken(oauthBearerToken) } @@ -700,6 +781,10 @@ func (p *Producer) SetOAuthBearerToken(oauthBearerToken OAuthBearerToken) error // 2) SASL/OAUTHBEARER is supported but is not configured as the client's // authentication mechanism. func (p *Producer) SetOAuthBearerTokenFailure(errstr string) error { + err := p.verifyClient() + if err != nil { + return err + } return p.handle.setOAuthBearerTokenFailure(errstr) } @@ -722,25 +807,23 @@ func (p *Producer) SetOAuthBearerTokenFailure(errstr string) error { // will acquire the internal producer id and epoch, used in all future // transactional messages issued by this producer instance. // -// Upon successful return from this function the application has to perform at -// least one of the following operations within `transaction.timeout.ms` to -// avoid timing out the transaction on the broker: -// * `Produce()` (et.al) -// * `SendOffsetsToTransaction()` -// * `CommitTransaction()` -// * `AbortTransaction()` -// // Parameters: -// * `ctx` - The maximum time to block, or nil for indefinite. -// On timeout the operation may continue in the background, -// depending on state, and it is okay to call `InitTransactions()` -// again. +// - `ctx` - The maximum time to block, or nil for indefinite. +// On timeout the operation may continue in the background, +// depending on state, and it is okay to call `InitTransactions()` +// again. +// Providing a nil context or a context without a deadline uses +// the timeout 2*transaction.timeout.ms. // // Returns nil on success or an error on failure. // Check whether the returned error object permits retrying // by calling `err.(kafka.Error).IsRetriable()`, or whether a fatal // error has been raised by calling `err.(kafka.Error).IsFatal()`. func (p *Producer) InitTransactions(ctx context.Context) error { + err := p.verifyClient() + if err != nil { + return err + } cError := C.rd_kafka_init_transactions(p.handle.rk, cTimeoutFromContext(ctx)) if cError != nil { @@ -755,6 +838,14 @@ func (p *Producer) InitTransactions(ctx context.Context) error { // `InitTransactions()` must have been called successfully (once) // before this function is called. // +// Upon successful return from this function the application has to perform at +// least one of the following operations within `transaction.timeout.ms` to +// avoid timing out the transaction on the broker: +// - `Produce()` (et.al) +// - `SendOffsetsToTransaction()` +// - `CommitTransaction()` +// - `AbortTransaction()` +// // Any messages produced, offsets sent (`SendOffsetsToTransaction()`), // etc, after the successful return of this function will be part of // the transaction and committed or aborted atomatically. @@ -771,6 +862,10 @@ func (p *Producer) InitTransactions(ctx context.Context) error { // Any produce call outside an on-going transaction, or for a failed // transaction, will fail. func (p *Producer) BeginTransaction() error { + err := p.verifyClient() + if err != nil { + return err + } cError := C.rd_kafka_begin_transaction(p.handle.rk) if cError != nil { return newErrorFromCErrorDestroy(cError) @@ -795,13 +890,13 @@ func (p *Producer) BeginTransaction() error { // to committing the transaction with `CommitTransaction()`. // // Parameters: -// * `ctx` - The maximum amount of time to block, or nil for indefinite. -// * `offsets` - List of offsets to commit to the consumer group upon -// successful commit of the transaction. Offsets should be -// the next message to consume, e.g., last processed message + 1. -// * `consumerMetadata` - The current consumer group metadata as returned by -// `consumer.GetConsumerGroupMetadata()` on the consumer -// instance the provided offsets were consumed from. +// - `ctx` - The maximum amount of time to block, or nil for indefinite. +// - `offsets` - List of offsets to commit to the consumer group upon +// successful commit of the transaction. Offsets should be +// the next message to consume, e.g., last processed message + 1. +// - `consumerMetadata` - The current consumer group metadata as returned by +// `consumer.GetConsumerGroupMetadata()` on the consumer +// instance the provided offsets were consumed from. // // Note: The consumer must disable auto commits (set `enable.auto.commit` to false on the consumer). // @@ -816,6 +911,10 @@ func (p *Producer) BeginTransaction() error { // `err.(kafka.Error).TxnRequiresAbort()` or `err.(kafka.Error).IsFatal()` // respectively. func (p *Producer) SendOffsetsToTransaction(ctx context.Context, offsets []TopicPartition, consumerMetadata *ConsumerGroupMetadata) error { + err := p.verifyClient() + if err != nil { + return err + } var cOffsets *C.rd_kafka_topic_partition_list_t if offsets != nil { cOffsets = newCPartsFromTopicPartitions(offsets) @@ -852,7 +951,7 @@ func (p *Producer) SendOffsetsToTransaction(ctx context.Context, offsets []Topic // transaction with `BeginTransaction()`. // // Parameters: -// * `ctx` - The maximum amount of time to block, or nil for indefinite. +// - `ctx` - The maximum amount of time to block, or nil for indefinite. // // Note: This function will block until all outstanding messages are // delivered and the transaction commit request has been successfully @@ -872,6 +971,10 @@ func (p *Producer) SendOffsetsToTransaction(ctx context.Context, offsets []Topic // `err.(kafka.Error).TxnRequiresAbort()` or `err.(kafka.Error).IsFatal()` // respectively. func (p *Producer) CommitTransaction(ctx context.Context) error { + err := p.verifyClient() + if err != nil { + return err + } cError := C.rd_kafka_commit_transaction(p.handle.rk, cTimeoutFromContext(ctx)) if cError != nil { @@ -890,7 +993,7 @@ func (p *Producer) CommitTransaction(ctx context.Context) error { // `ErrPurgeInflight` or `ErrPurgeQueue`. // // Parameters: -// * `ctx` - The maximum amount of time to block, or nil for indefinite. +// - `ctx` - The maximum amount of time to block, or nil for indefinite. // // Note: This function will block until all outstanding messages are purged // and the transaction abort request has been successfully @@ -908,6 +1011,10 @@ func (p *Producer) CommitTransaction(ctx context.Context) error { // by calling `err.(kafka.Error).IsRetriable()`, or whether a fatal error // has been raised by calling `err.(kafka.Error).IsFatal()`. func (p *Producer) AbortTransaction(ctx context.Context) error { + err := p.verifyClient() + if err != nil { + return err + } cError := C.rd_kafka_abort_transaction(p.handle.rk, cTimeoutFromContext(ctx)) if cError != nil { @@ -916,3 +1023,17 @@ func (p *Producer) AbortTransaction(ctx context.Context) error { return nil } + +// SetSaslCredentials sets the SASL credentials used for this producer. The new credentials +// will overwrite the old ones (which were set when creating the producer or by a previous +// call to SetSaslCredentials). The new credentials will be used the next time this +// producer needs to authenticate to a broker. This method will not disconnect +// existing broker connections that were established with the old credentials. +// This method applies only to the SASL PLAIN and SCRAM mechanisms. +func (p *Producer) SetSaslCredentials(username, password string) error { + err := p.verifyClient() + if err != nil { + return err + } + return setSaslCredentials(p.handle.rk, username, password) +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/select_rdkafka.h b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/select_rdkafka.h similarity index 100% rename from vendor/github.com/confluentinc/confluent-kafka-go/kafka/select_rdkafka.h rename to vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/select_rdkafka.h diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/testconf-example.json b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/testconf-example.json similarity index 50% rename from vendor/github.com/confluentinc/confluent-kafka-go/kafka/testconf-example.json rename to vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/testconf-example.json index 7024a9c0..349cdd37 100644 --- a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/testconf-example.json +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/testconf-example.json @@ -1,6 +1,10 @@ { "Brokers": "mybroker or $BROKERS env", - "Topic": "test", + "BrokersSasl": "mybroker or $BROKERSSASL env", + "SaslUsername": "testuser", + "SaslPassword": "testpass", + "SaslMechanism": "PLAIN", + "TopicName": "test", "GroupID": "testgroup", "PerfMsgCount": 1000000, "PerfMsgSize": 100, diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/kafka/time.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/time.go similarity index 100% rename from vendor/github.com/confluentinc/confluent-kafka-go/kafka/time.go rename to vendor/github.com/confluentinc/confluent-kafka-go/v2/kafka/time.go diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/api.html b/vendor/github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/api.html new file mode 100644 index 00000000..15bf1156 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/api.html @@ -0,0 +1,788 @@ + + + + + + + + schemaregistry - Go Documentation Server + + + + + + + + +
+ ... +
+ +
+
+

+ Package schemaregistry + + +

+ + + + +
+
+
+ + import "github.com/confluentinc/confluent-kafka-go/v2/schemaregistry" + +
+
+
+
+ + Overview + +
+
+ + Index + +
+
+
+
+
+ +
+ +
+

+ Overview ▾ +

+
+
+
+ + + +
+ +

+ Constants +

+
const (
+    // Upgrade denotes upgrade mode
+    Upgrade = 1
+    // Downgrade denotes downgrade mode
+    Downgrade = 2
+    // UpDown denotes upgrade/downgrade mode
+    UpDown = 3
+    // Write denotes write mode
+    Write = 4
+    // Read denotes read mode
+    Read = 5
+    // WriteRead denotes write/read mode
+    WriteRead = 6
+)
+
const (
+
+    // None is no compatibility
+    None
+    // Backward compatibility
+    Backward
+    // Forward compatibility
+    Forward
+    // Full compatibility
+    Full
+    // BackwardTransitive compatibility
+    BackwardTransitive
+    // ForwardTransitive compatibility
+    ForwardTransitive
+    // FullTransitive compatibility
+    FullTransitive
+)
+

+ type + + Client + + +

+

+ Client is an interface for clients interacting with the Confluent Schema Registry. +The Schema Registry's REST interface is further explained in Confluent's Schema Registry API documentation + + https://github.com/confluentinc/schema-registry/blob/master/client/src/main/java/io/confluent/kafka/schemaregistry/client/SchemaRegistryClient.java + +

type Client interface {
+    Config() *Config
+    GetAllContexts() ([]string, error)
+    Register(subject string, schema SchemaInfo, normalize bool) (id int, err error)
+    RegisterFullResponse(subject string, schema SchemaInfo, normalize bool) (result SchemaMetadata, err error)
+    GetBySubjectAndID(subject string, id int) (schema SchemaInfo, err error)
+    GetSubjectsAndVersionsByID(id int) (subjectAndVersion []SubjectAndVersion, err error)
+    GetID(subject string, schema SchemaInfo, normalize bool) (id int, err error)
+    GetLatestSchemaMetadata(subject string) (SchemaMetadata, error)
+    GetSchemaMetadata(subject string, version int) (SchemaMetadata, error)
+    GetSchemaMetadataIncludeDeleted(subject string, version int, deleted bool) (SchemaMetadata, error)
+    GetLatestWithMetadata(subject string, metadata map[string]string, deleted bool) (SchemaMetadata, error)
+    GetAllVersions(subject string) ([]int, error)
+    GetVersion(subject string, schema SchemaInfo, normalize bool) (version int, err error)
+    GetAllSubjects() ([]string, error)
+    DeleteSubject(subject string, permanent bool) ([]int, error)
+    DeleteSubjectVersion(subject string, version int, permanent bool) (deletes int, err error)
+    TestSubjectCompatibility(subject string, schema SchemaInfo) (compatible bool, err error)
+    TestCompatibility(subject string, version int, schema SchemaInfo) (compatible bool, err error)
+    GetCompatibility(subject string) (compatibility Compatibility, err error)
+    UpdateCompatibility(subject string, update Compatibility) (compatibility Compatibility, err error)
+    GetDefaultCompatibility() (compatibility Compatibility, err error)
+    UpdateDefaultCompatibility(update Compatibility) (compatibility Compatibility, err error)
+    GetConfig(subject string, defaultToGlobal bool) (result ServerConfig, err error)
+    UpdateConfig(subject string, update ServerConfig) (result ServerConfig, err error)
+    GetDefaultConfig() (result ServerConfig, err error)
+    UpdateDefaultConfig(update ServerConfig) (result ServerConfig, err error)
+    ClearLatestCaches() error
+    ClearCaches() error
+    Close() error
+}
+

+ func + + NewClient + + +

+
func NewClient(conf *Config) (Client, error)
+

+ NewClient returns a Client implementation +

+ type + + Compatibility + + +

+

+ Compatibility options +

type Compatibility int
+

+ func (*Compatibility) + + MarshalJSON + + +

+
func (c *Compatibility) MarshalJSON() ([]byte, error)
+

+ MarshalJSON implements json.Marshaler +

+ func (*Compatibility) + + ParseString + + +

+
func (c *Compatibility) ParseString(val string) error
+

+ ParseString returns a Compatibility for the given string +

+ func (*Compatibility) + + String + + +

+
func (c *Compatibility) String() string
+

+ func (*Compatibility) + + UnmarshalJSON + + +

+
func (c *Compatibility) UnmarshalJSON(b []byte) error
+

+ UnmarshalJSON implements json.Unmarshaler +

+ type + + Config + + +

+

+ Config is used to pass multiple configuration options to the Schema Registry client. +

type Config struct {
+    internal.ClientConfig
+}
+
+

+ func + + NewConfig + + +

+
func NewConfig(url string) *Config
+

+ NewConfig returns a new configuration instance with sane defaults. +

+ func + + NewConfigWithAuthentication + + +

+
func NewConfigWithAuthentication(url string, username string, password string) *Config
+

+ NewConfigWithAuthentication returns a new configuration instance using basic authentication. +For Confluent Cloud, use the API key for the username and the API secret for the password. +This method is deprecated. +

+ func + + NewConfigWithBasicAuthentication + + +

+
func NewConfigWithBasicAuthentication(url string, username string, password string) *Config
+

+ NewConfigWithBasicAuthentication returns a new configuration instance using basic authentication. +For Confluent Cloud, use the API key for the username and the API secret for the password. +

+ func + + NewConfigWithBearerAuthentication + + +

+
func NewConfigWithBearerAuthentication(url, token, targetSr, identityPoolID string) *Config
+

+ NewConfigWithBearerAuthentication returns a new configuration instance using bearer authentication. +For Confluent Cloud, targetSr(`bearer.auth.logical.cluster` and +identityPoolID(`bearer.auth.identity.pool.id`) is required +

+ type + + Metadata + + +

+

+ Metadata represents user-defined metadata +

type Metadata struct {
+    Tags       map[string][]string `json:"tags,omitempty"`
+    Properties map[string]string   `json:"properties,omitempty"`
+    Sensitive  []string            `json:"sensitive,omitempty"`
+}
+
+

+ type + + Reference + + +

+

+ Reference represents a schema reference +

type Reference struct {
+    Name    string `json:"name"`
+    Subject string `json:"subject"`
+    Version int    `json:"version"`
+}
+
+

+ type + + Rule + + +

+

+ Rule represents a data contract rule +

type Rule struct {
+    // Rule name
+    Name string `json:"name,omitempty"`
+    // Rule doc
+    Doc string `json:"doc,omitempty"`
+    // Rule kind
+    Kind string `json:"kind,omitempty"`
+    // Rule mode
+    Mode string `json:"mode,omitempty"`
+    // Rule type
+    Type string `json:"type,omitempty"`
+    // The tags to which this rule applies
+    Tags []string `json:"tags,omitempty"`
+    // Optional params for the rule
+    Params map[string]string `json:"params,omitempty"`
+    // Rule expression
+    Expr string `json:"expr,omitempty"`
+    // Rule action on success
+    OnSuccess string `json:"onSuccess,omitempty"`
+    // Rule action on failure
+    OnFailure string `json:"onFailure,omitempty"`
+    // Whether the rule is disabled
+    Disabled bool `json:"disabled,omitempty"`
+}
+
+

+ type + + RuleMode + + +

+

+ RuleMode represents the rule mode +

type RuleMode = int
+

+ func + + ParseMode + + +

+
func ParseMode(mode string) (RuleMode, bool)
+

+ ParseMode parses the given rule mode +

+ type + + RuleSet + + +

+

+ RuleSet represents a data contract rule set +

type RuleSet struct {
+    MigrationRules []Rule `json:"migrationRules,omitempty"`
+    DomainRules    []Rule `json:"domainRules,omitempty"`
+}
+
+

+ func (*RuleSet) + + HasRules + + +

+
func (r *RuleSet) HasRules(mode RuleMode) bool
+

+ HasRules checks if the ruleset has rules for the given mode +

+ type + + SchemaInfo + + +

+

+ SchemaInfo represents basic schema information +

type SchemaInfo struct {
+    Schema     string      `json:"schema,omitempty"`
+    SchemaType string      `json:"schemaType,omitempty"`
+    References []Reference `json:"references,omitempty"`
+    Metadata   *Metadata   `json:"metadata,omitempty"`
+    RuleSet    *RuleSet    `json:"ruleSet,omitempty"`
+}
+
+

+ func (*SchemaInfo) + + MarshalJSON + + +

+
func (sd *SchemaInfo) MarshalJSON() ([]byte, error)
+

+ MarshalJSON implements the json.Marshaler interface +

+ func (*SchemaInfo) + + UnmarshalJSON + + +

+
func (sd *SchemaInfo) UnmarshalJSON(b []byte) error
+

+ UnmarshalJSON implements the json.Unmarshaller interface +

+ type + + SchemaMetadata + + +

+

+ SchemaMetadata represents schema metadata +

type SchemaMetadata struct {
+    SchemaInfo
+    ID      int    `json:"id,omitempty"`
+    Subject string `json:"subject,omitempty"`
+    Version int    `json:"version,omitempty"`
+}
+
+

+ func (*SchemaMetadata) + + MarshalJSON + + +

+
func (sd *SchemaMetadata) MarshalJSON() ([]byte, error)
+

+ MarshalJSON implements the json.Marshaler interface +

+ func (*SchemaMetadata) + + UnmarshalJSON + + +

+
func (sd *SchemaMetadata) UnmarshalJSON(b []byte) error
+

+ UnmarshalJSON implements the json.Unmarshaller interface +

+ type + + ServerConfig + + +

+

+ ServerConfig represents config params for Schema Registry +

NOTE: GET uses compatibilityLevel, POST uses compatibility
+
+
type ServerConfig struct {
+    Alias               string        `json:"alias,omitempty"`
+    Normalize           bool          `json:"normalize,omitempty"`
+    CompatibilityUpdate Compatibility `json:"compatibility,omitempty"`
+    CompatibilityLevel  Compatibility `json:"compatibilityLevel,omitempty"`
+    CompatibilityGroup  string        `json:"compatibilityGroup,omitempty"`
+    DefaultMetadata     *Metadata     `json:"defaultMetadata,omitempty"`
+    OverrideMetadata    *Metadata     `json:"overrideMetadata,omitempty"`
+    DefaultRuleSet      *RuleSet      `json:"defaultRuleSet,omitempty"`
+    OverrideRuleSet     *RuleSet      `json:"overrideRuleSet,omitempty"`
+}
+
+

+ type + + SubjectAndVersion + + +

+

+ SubjectAndVersion represents a pair of subject and version +

type SubjectAndVersion struct {
+    Subject string `json:"subject,omitempty"`
+    Version int    `json:"version,omitempty"`
+}
+
+ +

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+
+ +
+ + + + diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/cache/cache.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/cache/cache.go new file mode 100644 index 00000000..5f63c0b6 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/cache/cache.go @@ -0,0 +1,44 @@ +/** + * Copyright 2022 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cache + +// Cache represents a key-value storage where to put cached data +type Cache interface { + // Get returns the cache value associated with key + // + // Parameters: + // * `key` - the key to retrieve + // + // Returns the value associated with key and a bool that is `false` + // if the key was not found + Get(key interface{}) (interface{}, bool) + // Put puts a value in cache associated with key + // + // Parameters: + // * `key` - the key to put + // * `value` - the value to put + Put(key interface{}, value interface{}) + // Delete deletes the cache entry associated with key + // + // Parameters: + // * `key` - the key to delete + Delete(key interface{}) + // Clear clears the cache + Clear() + // ToMap returns the current cache entries copied into a map + ToMap() map[interface{}]interface{} +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/cache/lrucache.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/cache/lrucache.go new file mode 100644 index 00000000..d52ca987 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/cache/lrucache.go @@ -0,0 +1,156 @@ +/** + * Copyright 2022 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cache + +import ( + "container/list" + "fmt" + "sync" +) + +const maxPreallocateCapacity = 10000 + +// LRUCache is a Least Recently Used (LRU) Cache with given capacity +type LRUCache struct { + cacheLock sync.RWMutex + capacity int + entries map[interface{}]interface{} + lruElements map[interface{}]*list.Element + lruKeys *list.List +} + +// NewLRUCache creates a new Least Recently Used (LRU) Cache +// +// Parameters: +// - `capacity` - a positive integer indicating the max capacity of this cache +// +// Returns the new allocated LRU Cache and an error +func NewLRUCache(capacity int) (c *LRUCache, err error) { + if capacity <= 0 { + return nil, fmt.Errorf("capacity must be a positive integer") + } + c = new(LRUCache) + c.capacity = capacity + if capacity <= maxPreallocateCapacity { + c.entries = make(map[interface{}]interface{}, capacity) + c.lruElements = make(map[interface{}]*list.Element, capacity) + } else { + c.entries = make(map[interface{}]interface{}) + c.lruElements = make(map[interface{}]*list.Element) + } + c.lruKeys = list.New() + return +} + +// Get returns the cache value associated with key +// +// Parameters: +// - `key` - the key to retrieve +// +// Returns the value associated with key and a bool that is `false` +// if the key was not found +func (c *LRUCache) Get(key interface{}) (value interface{}, ok bool) { + var element *list.Element + c.cacheLock.RLock() + value, ok = c.entries[key] + if ok { + element, ok = c.lruElements[key] + } + c.cacheLock.RUnlock() + if ok { + c.cacheLock.Lock() + c.lruKeys.MoveToFront(element) + c.cacheLock.Unlock() + } else { + value = nil + } + return value, ok +} + +// Put puts a value in cache associated with key +// +// Parameters: +// - `key` - the key to put +// - `value` - the value to put +func (c *LRUCache) Put(key interface{}, value interface{}) { + c.cacheLock.Lock() + _, ok := c.entries[key] + if !ok { + // delete in advance to avoid increasing map capacity + if c.lruKeys.Len() == c.capacity { + back := c.lruKeys.Back() + if back != nil { + value := c.lruKeys.Remove(back) + delete(c.lruElements, value) + delete(c.entries, value) + } + } + element := c.lruKeys.PushFront(key) + c.lruElements[key] = element + } else { + existingElement, okElement := c.lruElements[key] + if okElement { + c.lruKeys.MoveToFront(existingElement) + } + } + c.entries[key] = value + c.cacheLock.Unlock() +} + +// Delete deletes the cache entry associated with key +// +// Parameters: +// - `key` - the key to delete +func (c *LRUCache) Delete(key interface{}) { + c.cacheLock.RLock() + _, ok := c.entries[key] + c.cacheLock.RUnlock() + if ok { + c.cacheLock.Lock() + element, okElement := c.lruElements[key] + if okElement { + delete(c.lruElements, key) + c.lruKeys.Remove(element) + } + delete(c.entries, key) + c.cacheLock.Unlock() + } +} + +// Clear clears the cache +func (c *LRUCache) Clear() { + c.cacheLock.Lock() + for key, value := range c.lruElements { + delete(c.lruElements, key) + c.lruKeys.Remove(value) + } + for key := range c.entries { + delete(c.entries, key) + } + c.cacheLock.Unlock() +} + +// ToMap returns the current cache entries copied into a map +func (c *LRUCache) ToMap() map[interface{}]interface{} { + ret := make(map[interface{}]interface{}) + c.cacheLock.RLock() + for k, v := range c.entries { + ret[k] = v + } + c.cacheLock.RUnlock() + return ret +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/cache/mapcache.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/cache/mapcache.go new file mode 100644 index 00000000..a098f3cb --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/cache/mapcache.go @@ -0,0 +1,74 @@ +/** + * Copyright 2022 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cache + +// MapCache is a cache backed by a map +type MapCache struct { + entries map[interface{}]interface{} +} + +// NewMapCache creates a new cache backed by a map +func NewMapCache() *MapCache { + c := new(MapCache) + c.entries = make(map[interface{}]interface{}) + return c +} + +// Get returns the cache value associated with key +// +// Parameters: +// - `key` - the key to retrieve +// +// Returns the value associated with key and a bool that is `false` +// if the key was not found +func (c *MapCache) Get(key interface{}) (value interface{}, ok bool) { + value, ok = c.entries[key] + return +} + +// Put puts a value in cache associated with key +// +// Parameters: +// - `key` - the key to put +// - `value` - the value to put +func (c *MapCache) Put(key interface{}, value interface{}) { + c.entries[key] = value +} + +// Delete deletes the cache entry associated with key +// +// Parameters: +// - `key` - the key to delete +func (c *MapCache) Delete(key interface{}) { + delete(c.entries, key) +} + +// Clear clears the cache +func (c *MapCache) Clear() { + for key := range c.entries { + delete(c.entries, key) + } +} + +// ToMap returns the current cache entries copied into a map +func (c *MapCache) ToMap() map[interface{}]interface{} { + ret := make(map[interface{}]interface{}) + for k, v := range c.entries { + ret[k] = v + } + return ret +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/config.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/config.go new file mode 100644 index 00000000..7e733e15 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/config.go @@ -0,0 +1,86 @@ +/** + * Copyright 2022 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package schemaregistry + +import ( + "fmt" + "github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/internal" +) + +// Config is used to pass multiple configuration options to the Schema Registry client. +type Config struct { + internal.ClientConfig +} + +// NewConfig returns a new configuration instance with sane defaults. +func NewConfig(url string) *Config { + c := &Config{} + + c.SchemaRegistryURL = url + + c.SaslMechanism = "GSSAPI" + c.SaslUsername = "" + c.SaslPassword = "" + + c.SslCertificateLocation = "" + c.SslKeyLocation = "" + c.SslCaLocation = "" + c.SslDisableEndpointVerification = false + + c.ConnectionTimeoutMs = 10000 + c.RequestTimeoutMs = 10000 + + return c +} + +// NewConfigWithAuthentication returns a new configuration instance using basic authentication. +// For Confluent Cloud, use the API key for the username and the API secret for the password. +// This method is deprecated. +func NewConfigWithAuthentication(url string, username string, password string) *Config { + c := NewConfig(url) + + c.BasicAuthUserInfo = fmt.Sprintf("%s:%s", username, password) + c.BasicAuthCredentialsSource = "USER_INFO" + + return c +} + +// NewConfigWithBasicAuthentication returns a new configuration instance using basic authentication. +// For Confluent Cloud, use the API key for the username and the API secret for the password. +func NewConfigWithBasicAuthentication(url string, username string, password string) *Config { + c := NewConfig(url) + + c.BasicAuthUserInfo = fmt.Sprintf("%s:%s", username, password) + c.BasicAuthCredentialsSource = "USER_INFO" + + return c +} + +// NewConfigWithBearerAuthentication returns a new configuration instance using bearer authentication. +// For Confluent Cloud, targetSr(`bearer.auth.logical.cluster` and +// identityPoolID(`bearer.auth.identity.pool.id`) is required +func NewConfigWithBearerAuthentication(url, token, targetSr, identityPoolID string) *Config { + + c := NewConfig(url) + + c.BearerAuthToken = token + c.BearerAuthCredentialsSource = "STATIC_TOKEN" + c.BearerAuthLogicalCluster = targetSr + c.BearerAuthIdentityPoolID = identityPoolID + + return c +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/internal/client_config.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/internal/client_config.go new file mode 100644 index 00000000..b16527ad --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/internal/client_config.go @@ -0,0 +1,69 @@ +/** + * Copyright 2022 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package internal + +import ( + "net/http" +) + +// ClientConfig is used to pass multiple configuration options to the Schema Registry client. +type ClientConfig struct { + // SchemaRegistryURL determines the URL of Schema Registry. + SchemaRegistryURL string + + // BasicAuthUserInfo specifies the user info in the form of {username}:{password}. + BasicAuthUserInfo string + // BasicAuthCredentialsSource specifies how to determine the credentials, one of URL, USER_INFO, and SASL_INHERIT. + BasicAuthCredentialsSource string + + // SaslMechanism specifies the SASL mechanism used for client connections, which defaults to GSSAPI. + SaslMechanism string + // SaslUsername specifies the username for SASL. + SaslUsername string + // SaslUsername specifies the password for SASL. + SaslPassword string + + // BearerAuthToken specifies the token for authentication. + BearerAuthToken string + // BearerAuthCredentialsSource specifies how to determine the credentials. + BearerAuthCredentialsSource string + // BearerAuthLogicalCluster specifies the target SR logical cluster id. It is required for Confluent Cloud Schema Registry + BearerAuthLogicalCluster string + // BearerAuthIdentityPoolID specifies the identity pool ID. It is required for Confluent Cloud Schema Registry + BearerAuthIdentityPoolID string + + // SslCertificateLocation specifies the location of SSL certificates. + SslCertificateLocation string + // SslKeyLocation specifies the location of SSL keys. + SslKeyLocation string + // SslCaLocation specifies the location of SSL certificate authorities. + SslCaLocation string + // SslDisableEndpointVerification determines whether to disable endpoint verification. + SslDisableEndpointVerification bool + + // ConnectionTimeoutMs determines the connection timeout in milliseconds. + ConnectionTimeoutMs int + // RequestTimeoutMs determines the request timeout in milliseconds. + RequestTimeoutMs int + // CacheCapacity positive integer or zero for unbounded capacity + CacheCapacity int + // CacheLatestTTLSecs ttl in secs for caching the latest schema + CacheLatestTTLSecs int + + // HTTP client + HTTPClient *http.Client +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/internal/rest_service.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/internal/rest_service.go new file mode 100644 index 00000000..b0a2d1fa --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/internal/rest_service.go @@ -0,0 +1,373 @@ +/** + * Copyright 2022 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package internal + +import ( + "bytes" + "crypto/tls" + "crypto/x509" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "net" + "net/http" + "net/url" + "path" + "strings" + "time" + + "github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/rest" +) + +// Relative Confluent Schema Registry REST API endpoints as described in the Confluent documentation +// https://docs.confluent.io/current/schema-registry/docs/api.html +const ( + Base = ".." + Schemas = "/schemas/ids/%d" + Contexts = "/contexts" + SchemasBySubject = "/schemas/ids/%d?subject=%s" + SubjectsAndVersionsByID = "/schemas/ids/%d/versions" + Subject = "/subjects" + Subjects = Subject + "/%s" + SubjectsNormalize = Subject + "/%s?normalize=%t" + SubjectsDelete = Subjects + "?permanent=%t" + LatestWithMetadata = Subjects + "/metadata?deleted=%t%s" + Version = Subjects + "/versions" + VersionNormalize = Subjects + "/versions?normalize=%t" + Versions = Version + "/%v" + VersionsIncludeDeleted = Versions + "?deleted=%t" + VersionsDelete = Versions + "?permanent=%t" + SubjectCompatibility = "/compatibility" + Version + Compatibility = "/compatibility" + Versions + Config = "/config" + SubjectConfig = Config + "/%s" + SubjectConfigDefault = SubjectConfig + "?defaultToGlobal=%t" + Mode = "/mode" + SubjectMode = Mode + "/%s" + + Keks = "/dek-registry/v1/keks" + KekByName = Keks + "/%s?deleted=%t" + Deks = Keks + "/%s/deks" + DeksBySubject = Deks + "/%s?deleted=%t" + DeksByVersion = Deks + "/%s/versions/%v?deleted=%t" + + TargetSRClusterKey = "Target-Sr-Cluster" + TargetIdentityPoolIDKey = "Confluent-Identity-Pool-Id" +) + +// API represents a REST API request +type API struct { + method string + endpoint string + arguments []interface{} + body interface{} +} + +// NewRequest returns new Confluent Schema Registry API request */ +func NewRequest(method string, endpoint string, body interface{}, arguments ...interface{}) *API { + return &API{ + method: method, + endpoint: endpoint, + arguments: arguments, + body: body, + } +} + +/* +* HTTP error codes/ SR int:error_code: +* 402: Invalid {resource} +* 404: {resource} not found +* - 40401 - Subject not found +* - 40402 - SchemaMetadata not found +* - 40403 - Schema not found +* 422: Invalid {resource} +* - 42201 - Invalid Schema +* - 42202 - Invalid SchemaMetadata +* 500: Internal Server Error (something broke between SR and Kafka) +* - 50001 - Error in backend(kafka) +* - 50002 - Operation timed out +* - 50003 - Error forwarding request to SR leader + */ + +// RestService represents a REST client +type RestService struct { + url *url.URL + headers http.Header + *http.Client +} + +// NewRestService returns a new REST client for the Confluent Schema Registry +func NewRestService(conf *ClientConfig) (*RestService, error) { + urlConf := conf.SchemaRegistryURL + u, err := url.Parse(urlConf) + + if err != nil { + return nil, err + } + + headers, err := NewAuthHeader(u, conf) + if err != nil { + return nil, err + } + + headers.Add("Content-Type", "application/vnd.schemaregistry.v1+json") + if err != nil { + return nil, err + } + + if conf.HTTPClient == nil { + transport, err := configureTransport(conf) + if err != nil { + return nil, err + } + + timeout := conf.RequestTimeoutMs + + conf.HTTPClient = &http.Client{ + Transport: transport, + Timeout: time.Duration(timeout) * time.Millisecond, + } + } + + return &RestService{ + url: u, + headers: headers, + Client: conf.HTTPClient, + }, nil +} + +// encodeBasicAuth adds a basic http authentication header to the provided header +func encodeBasicAuth(userinfo string) string { + return base64.StdEncoding.EncodeToString([]byte(userinfo)) +} + +// ConfigureTLS populates tlsConf +func ConfigureTLS(conf *ClientConfig, tlsConf *tls.Config) error { + certFile := conf.SslCertificateLocation + keyFile := conf.SslKeyLocation + caFile := conf.SslCaLocation + unsafe := conf.SslDisableEndpointVerification + + var err error + if certFile != "" { + if keyFile == "" { + return errors.New( + "SslKeyLocation needs to be provided if using SslCertificateLocation") + } + var cert tls.Certificate + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return err + } + tlsConf.Certificates = []tls.Certificate{cert} + } + + if caFile != "" { + if unsafe { + log.Println("WARN: endpoint verification is currently disabled. " + + "This feature should be configured for development purposes only") + } + var caCert []byte + caCert, err := ioutil.ReadFile(caFile) + if err != nil { + return err + } + + tlsConf.RootCAs = x509.NewCertPool() + if !tlsConf.RootCAs.AppendCertsFromPEM(caCert) { + return fmt.Errorf("could not parse certificate from %s", caFile) + } + } + + tlsConf.BuildNameToCertificate() + + return err +} + +// configureTransport returns a new Transport for use by the Confluent Schema Registry REST client +func configureTransport(conf *ClientConfig) (*http.Transport, error) { + + // Exposed for testing purposes only. In production properly formed certificates should be used + // https://tools.ietf.org/html/rfc2818#section-3 + tlsConfig := &tls.Config{} + if err := ConfigureTLS(conf, tlsConfig); err != nil { + return nil, err + } + + timeout := conf.ConnectionTimeoutMs + + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: time.Duration(timeout) * time.Millisecond, + }).Dial, + TLSClientConfig: tlsConfig, + }, nil +} + +// configureURLAuth copies the url userinfo into a basic HTTP auth authorization header +func configureURLAuth(service *url.URL, header http.Header) error { + header.Add("Authorization", fmt.Sprintf("Basic %s", encodeBasicAuth(service.User.String()))) + return nil +} + +// configureSASLAuth copies the sasl username and password into a HTTP basic authorization header +func configureSASLAuth(conf *ClientConfig, header http.Header) error { + mech := conf.SaslMechanism + if strings.ToUpper(mech) == "GSSAPI" { + return fmt.Errorf("SASL_INHERIT support PLAIN and SCRAM SASL mechanisms only") + } + + user := conf.SaslUsername + pass := conf.SaslPassword + if user == "" || pass == "" { + return fmt.Errorf("SASL_INHERIT requires both sasl.username and sasl.password be set") + } + + header.Add("Authorization", fmt.Sprintf("Basic %s", encodeBasicAuth(fmt.Sprintf("%s:%s", user, pass)))) + return nil +} + +// configureUSERINFOAuth copies basic.auth.user.info +func configureUSERINFOAuth(conf *ClientConfig, header http.Header) error { + auth := conf.BasicAuthUserInfo + if auth == "" { + return fmt.Errorf("USER_INFO source configured without basic.auth.user.info ") + } + + header.Add("Authorization", fmt.Sprintf("Basic %s", encodeBasicAuth(auth))) + return nil + +} + +func configureStaticTokenAuth(conf *ClientConfig, header http.Header) error { + bearerToken := conf.BearerAuthToken + if len(bearerToken) == 0 { + return fmt.Errorf("config bearer.auth.token must be specified when bearer.auth.credentials.source is" + + " specified with STATIC_TOKEN") + } + header.Add("Authorization", fmt.Sprintf("Bearer %s", bearerToken)) + setBearerAuthExtraHeaders(conf, header) + return nil +} + +func setBearerAuthExtraHeaders(conf *ClientConfig, header http.Header) { + targetIdentityPoolID := conf.BearerAuthIdentityPoolID + if len(targetIdentityPoolID) > 0 { + header.Add(TargetIdentityPoolIDKey, targetIdentityPoolID) + } + + targetSr := conf.BearerAuthLogicalCluster + if len(targetSr) > 0 { + header.Add(TargetSRClusterKey, targetSr) + } +} + +// NewAuthHeader returns a base64 encoded userinfo string identified on the configured credentials source +func NewAuthHeader(service *url.URL, conf *ClientConfig) (http.Header, error) { + // Remove userinfo from url regardless of source to avoid confusion/conflicts + defer func() { + service.User = nil + }() + + header := http.Header{} + + basicSource := conf.BasicAuthCredentialsSource + bearerSource := conf.BearerAuthCredentialsSource + + var err error + if len(basicSource) != 0 && len(bearerSource) != 0 { + return header, fmt.Errorf("only one of basic.auth.credentials.source or bearer.auth.credentials.source" + + " may be specified") + } else if len(basicSource) != 0 { + switch strings.ToUpper(basicSource) { + case "URL": + err = configureURLAuth(service, header) + case "SASL_INHERIT": + err = configureSASLAuth(conf, header) + case "USER_INFO": + err = configureUSERINFOAuth(conf, header) + default: + err = fmt.Errorf("unrecognized value for basic.auth.credentials.source %s", basicSource) + } + } else if len(bearerSource) != 0 { + switch strings.ToUpper(bearerSource) { + case "STATIC_TOKEN": + err = configureStaticTokenAuth(conf, header) + //case "OAUTHBEARER": + // err = configureOauthBearerAuth(conf, header) + //case "SASL_OAUTHBEARER_INHERIT": + // err = configureSASLOauth() + //case "CUSTOM": + // err = configureCustomOauth(conf, header) + default: + err = fmt.Errorf("unrecognized value for bearer.auth.credentials.source %s", bearerSource) + } + } + + return header, err +} + +// HandleRequest sends a HTTP(S) request to the Schema Registry, placing results into the response object +func (rs *RestService) HandleRequest(request *API, response interface{}) error { + urlPath := path.Join(rs.url.Path, fmt.Sprintf(request.endpoint, request.arguments...)) + endpoint, err := rs.url.Parse(urlPath) + if err != nil { + return err + } + + var readCloser io.ReadCloser + if request.body != nil { + outbuf, err := json.Marshal(request.body) + if err != nil { + return err + } + readCloser = ioutil.NopCloser(bytes.NewBuffer(outbuf)) + } + + req := &http.Request{ + Method: request.method, + URL: endpoint, + Body: readCloser, + Header: rs.headers, + } + + resp, err := rs.Do(req) + + if err != nil { + return err + } + + defer resp.Body.Close() + if resp.StatusCode == 200 { + if err = json.NewDecoder(resp.Body).Decode(response); err != nil { + return err + } + return nil + } + + var failure rest.Error + if err := json.NewDecoder(resp.Body).Decode(&failure); err != nil { + return err + } + + return &failure +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/mock_schemaregistry_client.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/mock_schemaregistry_client.go new file mode 100644 index 00000000..47ead24b --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/mock_schemaregistry_client.go @@ -0,0 +1,755 @@ +/** + * Copyright 2022 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package schemaregistry + +import ( + "errors" + "fmt" + "net/url" + "reflect" + "sort" + "strings" + "sync" + + "github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/internal" +) + +const noSubject = "" + +type counter struct { + count int +} + +func (c *counter) currentValue() int { + return c.count +} + +func (c *counter) increment() int { + c.count++ + return c.count +} + +type versionCacheEntry struct { + version int + softDeleted bool +} + +type infoCacheEntry struct { + info *SchemaInfo + softDeleted bool +} + +type metadataCacheEntry struct { + metadata *SchemaMetadata + softDeleted bool +} + +/* HTTP(S) Schema Registry Client and schema caches */ +type mockclient struct { + sync.Mutex + config *Config + url *url.URL + infoToSchemaCache map[subjectJSON]metadataCacheEntry + infoToSchemaCacheLock sync.RWMutex + idToSchemaCache map[subjectID]infoCacheEntry + idToSchemaCacheLock sync.RWMutex + schemaToVersionCache map[subjectJSON]versionCacheEntry + schemaToVersionCacheLock sync.RWMutex + configCache map[string]ServerConfig + configCacheLock sync.RWMutex + counter counter +} + +var _ Client = new(mockclient) + +// Fetch all contexts used +// Returns a string slice containing contexts +func (c *mockclient) GetAllContexts() ([]string, error) { + return []string{"."}, nil +} + +// Config returns the client config +func (c *mockclient) Config() *Config { + return c.config +} + +// Register registers Schema aliased with subject +func (c *mockclient) Register(subject string, schema SchemaInfo, normalize bool) (id int, err error) { + metadata, err := c.RegisterFullResponse(subject, schema, normalize) + if err != nil { + return -1, err + } + return metadata.ID, err +} + +// RegisterFullResponse registers Schema aliased with subject +func (c *mockclient) RegisterFullResponse(subject string, schema SchemaInfo, normalize bool) (result SchemaMetadata, err error) { + schemaJSON, err := schema.MarshalJSON() + if err != nil { + return SchemaMetadata{ + ID: -1, + }, err + } + cacheKey := subjectJSON{ + subject: subject, + json: string(schemaJSON), + } + c.infoToSchemaCacheLock.RLock() + cacheEntryVal, ok := c.infoToSchemaCache[cacheKey] + if cacheEntryVal.softDeleted { + ok = false + } + c.infoToSchemaCacheLock.RUnlock() + if ok { + return *cacheEntryVal.metadata, nil + } + + id, err := c.getIDFromRegistry(subject, schema) + if err != nil { + return SchemaMetadata{ + ID: -1, + }, err + } + result = SchemaMetadata{ + SchemaInfo: schema, + ID: id, + } + c.infoToSchemaCacheLock.Lock() + c.infoToSchemaCache[cacheKey] = metadataCacheEntry{&result, false} + c.infoToSchemaCacheLock.Unlock() + return result, nil +} + +func (c *mockclient) getIDFromRegistry(subject string, schema SchemaInfo) (int, error) { + var id = -1 + c.idToSchemaCacheLock.RLock() + for key, value := range c.idToSchemaCache { + if key.subject == subject && schemasEqual(*value.info, schema) { + id = key.id + break + } + } + c.idToSchemaCacheLock.RUnlock() + err := c.generateVersion(subject, schema) + if err != nil { + return -1, err + } + if id < 0 { + id = c.counter.increment() + idCacheKey := subjectID{ + subject: subject, + id: id, + } + c.idToSchemaCacheLock.Lock() + c.idToSchemaCache[idCacheKey] = infoCacheEntry{&schema, false} + c.idToSchemaCacheLock.Unlock() + } + return id, nil +} + +func (c *mockclient) generateVersion(subject string, schema SchemaInfo) error { + versions := c.allVersions(subject) + var newVersion int + if len(versions) == 0 { + newVersion = 1 + } else { + newVersion = versions[len(versions)-1] + 1 + } + schemaJSON, err := schema.MarshalJSON() + if err != nil { + return err + } + cacheKey := subjectJSON{ + subject: subject, + json: string(schemaJSON), + } + c.schemaToVersionCacheLock.Lock() + c.schemaToVersionCache[cacheKey] = versionCacheEntry{newVersion, false} + c.schemaToVersionCacheLock.Unlock() + return nil +} + +// GetBySubjectAndID returns the schema identified by id +// Returns Schema object on success +func (c *mockclient) GetBySubjectAndID(subject string, id int) (schema SchemaInfo, err error) { + cacheKey := subjectID{ + subject: subject, + id: id, + } + c.idToSchemaCacheLock.RLock() + cacheEntryValue, ok := c.idToSchemaCache[cacheKey] + c.idToSchemaCacheLock.RUnlock() + if ok { + return *cacheEntryValue.info, nil + } + posErr := url.Error{ + Op: "GET", + URL: c.url.String() + fmt.Sprintf(internal.SchemasBySubject, id, url.QueryEscape(subject)), + Err: errors.New("Subject Not Found"), + } + return SchemaInfo{}, &posErr +} + +func (c *mockclient) GetSubjectsAndVersionsByID(id int) (subjectsAndVersions []SubjectAndVersion, err error) { + subjectsAndVersions = make([]SubjectAndVersion, 0) + + c.infoToSchemaCacheLock.RLock() + c.schemaToVersionCacheLock.RLock() + + for key, value := range c.infoToSchemaCache { + if !value.softDeleted && value.metadata.ID == id { + var schemaJSON []byte + schemaJSON, err = value.metadata.SchemaInfo.MarshalJSON() + if err != nil { + return + } + + versionCacheKey := subjectJSON{ + subject: key.subject, + json: string(schemaJSON), + } + + versionEntry, ok := c.schemaToVersionCache[versionCacheKey] + if !ok { + err = fmt.Errorf("entry in version cache not found") + return + } + + subjectsAndVersions = append(subjectsAndVersions, SubjectAndVersion{ + Subject: key.subject, + Version: versionEntry.version, + }) + } + } + + c.schemaToVersionCacheLock.RUnlock() + c.infoToSchemaCacheLock.RUnlock() + + if len(subjectsAndVersions) == 0 { + err = &url.Error{ + Op: "GET", + URL: c.url.String() + fmt.Sprintf(internal.SubjectsAndVersionsByID, id), + Err: errors.New("schema ID not found"), + } + } + + sort.Slice(subjectsAndVersions, func(i, j int) bool { + return subjectsAndVersions[i].Subject < subjectsAndVersions[j].Subject + }) + return +} + +// GetID checks if a schema has been registered with the subject. Returns ID if the registration can be found +func (c *mockclient) GetID(subject string, schema SchemaInfo, normalize bool) (id int, err error) { + schemaJSON, err := schema.MarshalJSON() + if err != nil { + return -1, err + } + cacheKey := subjectJSON{ + subject: subject, + json: string(schemaJSON), + } + c.infoToSchemaCacheLock.RLock() + cacheEntryVal, ok := c.infoToSchemaCache[cacheKey] + if cacheEntryVal.softDeleted { + ok = false + } + c.infoToSchemaCacheLock.RUnlock() + if ok { + return cacheEntryVal.metadata.ID, nil + } + + posErr := url.Error{ + Op: "GET", + URL: c.url.String() + fmt.Sprintf(internal.Subjects, url.PathEscape(subject)), + Err: errors.New("Subject Not found"), + } + return -1, &posErr +} + +// GetLatestSchemaMetadata fetches latest version registered with the provided subject +// Returns SchemaMetadata object +func (c *mockclient) GetLatestSchemaMetadata(subject string) (result SchemaMetadata, err error) { + version := c.latestVersion(subject) + if version < 0 { + posErr := url.Error{ + Op: "GET", + URL: c.url.String() + fmt.Sprintf(internal.Versions, url.PathEscape(subject), "latest"), + Err: errors.New("Subject Not found"), + } + return SchemaMetadata{}, &posErr + } + return c.GetSchemaMetadata(subject, version) +} + +// GetSchemaMetadata fetches the requested subject schema identified by version +// Returns SchemaMetadata object +func (c *mockclient) GetSchemaMetadata(subject string, version int) (result SchemaMetadata, err error) { + return c.GetSchemaMetadataIncludeDeleted(subject, version, false) +} + +// GetSchemaMetadataIncludeDeleted fetches the requested subject schema identified by version and deleted flag +// Returns SchemaMetadata object +func (c *mockclient) GetSchemaMetadataIncludeDeleted(subject string, version int, deleted bool) (result SchemaMetadata, err error) { + var json string + c.schemaToVersionCacheLock.RLock() + for key, value := range c.schemaToVersionCache { + if key.subject == subject && value.version == version && (!value.softDeleted || deleted) { + json = key.json + break + } + } + c.schemaToVersionCacheLock.RUnlock() + if json == "" { + posErr := url.Error{ + Op: "GET", + URL: c.url.String() + fmt.Sprintf(internal.Versions, url.PathEscape(subject), version), + Err: errors.New("Subject Not found"), + } + return SchemaMetadata{}, &posErr + } + + var info SchemaInfo + err = info.UnmarshalJSON([]byte(json)) + if err != nil { + return SchemaMetadata{}, err + } + var id = -1 + c.idToSchemaCacheLock.RLock() + for key, value := range c.idToSchemaCache { + if key.subject == subject && schemasEqual(*value.info, info) && (!value.softDeleted || deleted) { + id = key.id + break + } + } + c.idToSchemaCacheLock.RUnlock() + if id == -1 { + posErr := url.Error{ + Op: "GET", + URL: c.url.String() + fmt.Sprintf(internal.Versions, url.PathEscape(subject), version), + Err: errors.New("Subject Not found"), + } + return SchemaMetadata{}, &posErr + } + return SchemaMetadata{ + SchemaInfo: info, + + ID: id, + Subject: subject, + Version: version, + }, nil +} + +// GetLatestWithMetadata fetches the latest subject schema with the given metadata +// Returns SchemaMetadata object +func (c *mockclient) GetLatestWithMetadata(subject string, metadata map[string]string, deleted bool) (result SchemaMetadata, err error) { + sb := strings.Builder{} + for key, value := range metadata { + _, _ = sb.WriteString("&key=") + _, _ = sb.WriteString(key) + _, _ = sb.WriteString("&value=") + _, _ = sb.WriteString(value) + } + metadataStr := sb.String() + var results []SchemaMetadata + c.schemaToVersionCacheLock.RLock() + for key, value := range c.schemaToVersionCache { + if key.subject == subject && (!value.softDeleted || deleted) { + var info SchemaInfo + err = info.UnmarshalJSON([]byte(key.json)) + if err != nil { + return SchemaMetadata{}, err + } + if info.Metadata != nil && isSubset(metadata, info.Metadata.Properties) { + results = append(results, SchemaMetadata{ + SchemaInfo: info, + Subject: subject, + Version: value.version, + }) + } + } + } + result.Version = 0 + for _, schema := range results { + if schema.Version > result.Version { + result = schema + } + } + c.schemaToVersionCacheLock.RUnlock() + if result.Version <= 0 { + posErr := url.Error{ + Op: "GET", + URL: c.url.String() + fmt.Sprintf(internal.LatestWithMetadata, url.PathEscape(subject), deleted, metadataStr), + Err: errors.New("Subject Not found"), + } + return SchemaMetadata{}, &posErr + } + + result.ID = -1 + c.idToSchemaCacheLock.RLock() + for key, value := range c.idToSchemaCache { + if key.subject == subject && schemasEqual(*value.info, result.SchemaInfo) && (!value.softDeleted || deleted) { + result.ID = key.id + break + } + } + c.idToSchemaCacheLock.RUnlock() + if result.ID < 0 { + posErr := url.Error{ + Op: "GET", + URL: c.url.String() + fmt.Sprintf(internal.LatestWithMetadata, url.PathEscape(subject), deleted, metadataStr), + Err: errors.New("Subject Not found"), + } + return SchemaMetadata{}, &posErr + } + return result, nil +} + +func isSubset(containee map[string]string, container map[string]string) bool { + for key, value := range containee { + if container[key] != value { + return false + } + } + return true +} + +// GetAllVersions fetches a list of all version numbers associated with the provided subject registration +// Returns integer slice on success +func (c *mockclient) GetAllVersions(subject string) (results []int, err error) { + results = c.allVersions(subject) + if len(results) == 0 { + posErr := url.Error{ + Op: "GET", + URL: c.url.String() + fmt.Sprintf(internal.Version, url.PathEscape(subject)), + Err: errors.New("Subject Not Found"), + } + return nil, &posErr + } + return results, err +} + +func (c *mockclient) allVersions(subject string) (results []int) { + versions := make([]int, 0) + c.schemaToVersionCacheLock.RLock() + for key, value := range c.schemaToVersionCache { + if key.subject == subject && !value.softDeleted { + versions = append(versions, value.version) + } + } + c.schemaToVersionCacheLock.RUnlock() + sort.Ints(versions) + return versions +} + +func (c *mockclient) latestVersion(subject string) int { + versions := c.allVersions(subject) + if len(versions) == 0 { + return -1 + } + return versions[len(versions)-1] +} + +func (c *mockclient) deleteVersion(key subjectJSON, version int, permanent bool) { + if permanent { + delete(c.schemaToVersionCache, key) + } else { + c.schemaToVersionCache[key] = versionCacheEntry{version, true} + } +} + +func (c *mockclient) deleteInfo(key subjectID, info *SchemaInfo, permanent bool) { + if permanent { + delete(c.idToSchemaCache, key) + } else { + c.idToSchemaCache[key] = infoCacheEntry{info, true} + } +} + +func (c *mockclient) deleteMetadata(key subjectJSON, metadata *SchemaMetadata, permanent bool) { + if permanent { + delete(c.infoToSchemaCache, key) + } else { + c.infoToSchemaCache[key] = metadataCacheEntry{metadata, true} + } +} + +// GetVersion finds the Subject SchemaMetadata associated with the provided schema +// Returns integer SchemaMetadata number +func (c *mockclient) GetVersion(subject string, schema SchemaInfo, normalize bool) (int, error) { + schemaJSON, err := schema.MarshalJSON() + if err != nil { + return -1, err + } + cacheKey := subjectJSON{ + subject: subject, + json: string(schemaJSON), + } + c.schemaToVersionCacheLock.RLock() + cacheEntryVal, ok := c.schemaToVersionCache[cacheKey] + if cacheEntryVal.softDeleted { + ok = false + } + c.schemaToVersionCacheLock.RUnlock() + if ok { + return cacheEntryVal.version, nil + } + posErr := url.Error{ + Op: "GET", + URL: c.url.String() + fmt.Sprintf(internal.Subjects, url.PathEscape(subject)), + Err: errors.New("Subject Not Found"), + } + return -1, &posErr +} + +// Fetch all Subjects registered with the schema Registry +// Returns a string slice containing all registered subjects +func (c *mockclient) GetAllSubjects() ([]string, error) { + subjects := make([]string, 0) + c.schemaToVersionCacheLock.RLock() + for key, value := range c.schemaToVersionCache { + if !value.softDeleted { + subjects = append(subjects, key.subject) + } + } + c.schemaToVersionCacheLock.RUnlock() + sort.Strings(subjects) + return subjects, nil +} + +// Deletes provided Subject from registry +// Returns integer slice of versions removed by delete +func (c *mockclient) DeleteSubject(subject string, permanent bool) (deleted []int, err error) { + c.infoToSchemaCacheLock.Lock() + for key, value := range c.infoToSchemaCache { + if key.subject == subject && (!value.softDeleted || permanent) { + c.deleteMetadata(key, value.metadata, permanent) + } + } + c.infoToSchemaCacheLock.Unlock() + c.schemaToVersionCacheLock.Lock() + for key, value := range c.schemaToVersionCache { + if key.subject == subject && (!value.softDeleted || permanent) { + c.deleteVersion(key, value.version, permanent) + deleted = append(deleted, value.version) + } + } + c.schemaToVersionCacheLock.Unlock() + c.configCacheLock.Lock() + delete(c.configCache, subject) + c.configCacheLock.Unlock() + if permanent { + c.idToSchemaCacheLock.Lock() + for key, value := range c.idToSchemaCache { + if key.subject == subject && (!value.softDeleted || permanent) { + c.deleteInfo(key, value.info, permanent) + } + } + c.idToSchemaCacheLock.Unlock() + } + return deleted, nil +} + +// DeleteSubjectVersion removes the version identified by delete from the subject's registration +// Returns integer id for the deleted version +func (c *mockclient) DeleteSubjectVersion(subject string, version int, permanent bool) (deleted int, err error) { + c.schemaToVersionCacheLock.Lock() + for key, value := range c.schemaToVersionCache { + if key.subject == subject && value.version == version { + c.deleteVersion(key, value.version, permanent) + schemaJSON := key.json + cacheKeySchema := subjectJSON{ + subject: subject, + json: schemaJSON, + } + c.infoToSchemaCacheLock.Lock() + infoSchemaEntryVal, ok := c.infoToSchemaCache[cacheKeySchema] + if ok { + c.deleteMetadata(key, infoSchemaEntryVal.metadata, permanent) + } + c.infoToSchemaCacheLock.Unlock() + if permanent && ok { + cacheKeyID := subjectID{ + subject: subject, + id: infoSchemaEntryVal.metadata.ID, + } + c.idToSchemaCacheLock.Lock() + idSchemaEntryVal, ok := c.idToSchemaCache[cacheKeyID] + if ok { + c.deleteInfo(cacheKeyID, idSchemaEntryVal.info, permanent) + } + c.idToSchemaCacheLock.Unlock() + } + } + } + c.schemaToVersionCacheLock.Unlock() + return version, nil +} + +// TestSubjectCompatibility verifies schema against all schemas in the subject +// Returns true if the schema is compatible, false otherwise +func (c *mockclient) TestSubjectCompatibility(subject string, schema SchemaInfo) (ok bool, err error) { + return false, errors.New("unsupported operation") +} + +// TestCompatibility verifies schema against the subject's compatibility policy +// Returns true if the schema is compatible, false otherwise +func (c *mockclient) TestCompatibility(subject string, version int, schema SchemaInfo) (ok bool, err error) { + return false, errors.New("unsupported operation") +} + +// Fetch compatibility level currently configured for provided subject +// Returns compatibility level string upon success +func (c *mockclient) GetCompatibility(subject string) (compatibility Compatibility, err error) { + c.configCacheLock.RLock() + result, ok := c.configCache[subject] + c.configCacheLock.RUnlock() + if !ok { + posErr := url.Error{ + Op: "GET", + URL: c.url.String() + fmt.Sprintf(internal.SubjectConfig, url.PathEscape(subject)), + Err: errors.New("Subject Not Found"), + } + return compatibility, &posErr + } + return result.CompatibilityLevel, nil +} + +// UpdateCompatibility updates subject's compatibility level +// Returns new compatibility level string upon success +func (c *mockclient) UpdateCompatibility(subject string, update Compatibility) (compatibility Compatibility, err error) { + c.configCacheLock.Lock() + c.configCache[subject] = ServerConfig{ + CompatibilityLevel: update, + } + c.configCacheLock.Unlock() + return update, nil +} + +// GetDefaultCompatibility fetches the global(default) compatibility level +// Returns global(default) compatibility level +func (c *mockclient) GetDefaultCompatibility() (compatibility Compatibility, err error) { + c.configCacheLock.RLock() + result, ok := c.configCache[noSubject] + c.configCacheLock.RUnlock() + if !ok { + posErr := url.Error{ + Op: "GET", + URL: c.url.String() + fmt.Sprint(internal.Config), + Err: errors.New("Subject Not Found"), + } + return compatibility, &posErr + } + return result.CompatibilityLevel, nil +} + +// UpdateDefaultCompatibility updates the global(default) compatibility level +// Returns new string compatibility level +func (c *mockclient) UpdateDefaultCompatibility(update Compatibility) (compatibility Compatibility, err error) { + c.configCacheLock.Lock() + c.configCache[noSubject] = ServerConfig{ + CompatibilityLevel: update, + } + c.configCacheLock.Unlock() + return update, nil +} + +// Fetch config currently configured for provided subject +// Returns config string upon success +func (c *mockclient) GetConfig(subject string, defaultToGlobal bool) (result ServerConfig, err error) { + c.configCacheLock.RLock() + result, ok := c.configCache[subject] + c.configCacheLock.RUnlock() + if !ok { + if !defaultToGlobal { + posErr := url.Error{ + Op: "GET", + URL: c.url.String() + fmt.Sprintf(internal.SubjectConfigDefault, url.PathEscape(subject), defaultToGlobal), + Err: errors.New("Subject Not Found"), + } + return result, &posErr + } + return c.GetDefaultConfig() + } + return result, nil +} + +// UpdateCompatibility updates subject's config +// Returns new config string upon success +func (c *mockclient) UpdateConfig(subject string, update ServerConfig) (result ServerConfig, err error) { + c.configCacheLock.Lock() + c.configCache[subject] = update + c.configCacheLock.Unlock() + return update, nil +} + +// GetDefaultCompatibility fetches the global(default) config +// Returns global(default) config +func (c *mockclient) GetDefaultConfig() (result ServerConfig, err error) { + c.configCacheLock.RLock() + result, ok := c.configCache[noSubject] + c.configCacheLock.RUnlock() + if !ok { + posErr := url.Error{ + Op: "GET", + URL: c.url.String() + fmt.Sprint(internal.Config), + Err: errors.New("Subject Not Found"), + } + return result, &posErr + } + return result, nil +} + +// UpdateDefaultCompatibility updates the global(default) config +// Returns new string config +func (c *mockclient) UpdateDefaultConfig(update ServerConfig) (result ServerConfig, err error) { + c.configCacheLock.Lock() + c.configCache[noSubject] = update + c.configCacheLock.Unlock() + return update, nil +} + +// ClearLatestCaches clears caches of latest versions +func (c *mockclient) ClearLatestCaches() error { + return nil +} + +// ClearCaches clears all caches +func (c *mockclient) ClearCaches() error { + return nil +} + +// Close closes the client +func (c *mockclient) Close() error { + return nil +} + +func schemasEqual(info1 SchemaInfo, info2 SchemaInfo) bool { + refs1 := info1.References + if refs1 == nil { + refs1 = make([]Reference, 0) + } + refs2 := info2.References + if refs2 == nil { + refs2 = make([]Reference, 0) + } + return info1.Schema == info2.Schema && + info1.SchemaType == info2.SchemaType && + reflect.DeepEqual(refs1, refs2) && + reflect.DeepEqual(info1.Metadata, info2.Metadata) && + reflect.DeepEqual(info1.RuleSet, info2.RuleSet) +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/rest/rest_error.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/rest/rest_error.go new file mode 100644 index 00000000..3d72a92b --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/rest/rest_error.go @@ -0,0 +1,16 @@ +package rest + +import ( + "fmt" +) + +// Error represents a Schema Registry HTTP Error response +type Error struct { + Code int `json:"error_code"` + Message string `json:"message"` +} + +// Error implements the errors.Error interface +func (err *Error) Error() string { + return fmt.Sprintf("schema registry request failed error code: %d: %s", err.Code, err.Message) +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/schemaregistry_client.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/schemaregistry_client.go new file mode 100644 index 00000000..2a62534b --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/schemaregistry_client.go @@ -0,0 +1,1101 @@ +/** + * Copyright 2022 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package schemaregistry + +import ( + "encoding/json" + "fmt" + "net/url" + "runtime" + "strings" + "sync" + "time" + + "github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/cache" + "github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/internal" +) + +/* Schema Registry API endpoints +* +* ====Contexts==== +* Fetch JSON array str:context of all contexts +* -GET /contexts returns: JSON array string: contexts; raises: 500[01] +* +* ====Schemas==== +* Fetch string: schema(escaped) identified by the input id. +* -GET /schemas/ids/{int: id} returns: JSON blob: schema; raises: 404[03], 500[01] +* Fetch string: JSON array (subject, version) of schemas identified by ID. +* -GET /schemas/ids/{int: id}/versions returns: JSON array; raises: 404[03], 500[01] +* +* ====Subjects==== +* Fetch JSON array str:subject of all registered subjects +* -GET /subjects returns: JSON array string: subjects; raises: 500[01] +* Fetch JSON array int:versions +* GET /subjects/{string: subject}/versions returns: JSON array of int: versions; raises: 404[01], 500[01] +* +* GET /subjects/{string: subject}/versions/{int|string('latest'): version} returns: JSON blob *schemaMetadata*; raises: 404[01, 02], 422[02], 500[01] +* GET /subjects/{string: subject}/versions/{int|string('latest'): version}/schema returns : JSON blob: schema(unescaped); raises: 404, 422, 500[01, 02, 03] +* +* Delete subject and it's associated subject configuration subjectConfig +* -DELETE /subjects/{string: subject}) returns: JSON array int: version; raises: 404[01], 500[01] +* Delete subject version +* -DELETE /subjects/{string: subject}/versions/{int|str('latest'): version} returns int: deleted version id; raises: 404[01, 02] +* +* Register new schema under subject +* -POST /subjects/{string: subject}/versions returns JSON blob ; raises: 409, 422[01], 500[01, 02, 03] +* Return SchemaMetadata for the subject version (if any) associated with the schema in the request body +* -POST /subjects/{string: subject} returns JSON *schemaMetadata*; raises: 404[01, 03] +* +* ====Compatibility==== +* Test schema (http body) against configured comparability for subject version +* -POST /compatibility/subjects/{string: subject}/versions/{int:string('latest'): version} returns: JSON bool:is_compatible; raises: 404[01,02], 422[01,02], 500[01] +* +* ====SerializerConfig==== +* Returns global configuration +* -GET /config returns: JSON string:comparability; raises: 500[01] +* Update global SR config +* -PUT /config returns: JSON string:compatibility; raises: 422[03], 500[01, 03] +* Update subject level subjectConfig +* -PUT /config/{string: subject} returns: JSON string:compatibility; raises: 422[03], 500[01,03] +* Returns compatibility level of subject +* GET /config/(string: subject) returns: JSON string:compatibility; raises: 404, 500[01] + */ + +// Rule represents a data contract rule +type Rule struct { + // Rule name + Name string `json:"name,omitempty"` + // Rule doc + Doc string `json:"doc,omitempty"` + // Rule kind + Kind string `json:"kind,omitempty"` + // Rule mode + Mode string `json:"mode,omitempty"` + // Rule type + Type string `json:"type,omitempty"` + // The tags to which this rule applies + Tags []string `json:"tags,omitempty"` + // Optional params for the rule + Params map[string]string `json:"params,omitempty"` + // Rule expression + Expr string `json:"expr,omitempty"` + // Rule action on success + OnSuccess string `json:"onSuccess,omitempty"` + // Rule action on failure + OnFailure string `json:"onFailure,omitempty"` + // Whether the rule is disabled + Disabled bool `json:"disabled,omitempty"` +} + +// RuleMode represents the rule mode +type RuleMode = int + +const ( + // Upgrade denotes upgrade mode + Upgrade = 1 + // Downgrade denotes downgrade mode + Downgrade = 2 + // UpDown denotes upgrade/downgrade mode + UpDown = 3 + // Write denotes write mode + Write = 4 + // Read denotes read mode + Read = 5 + // WriteRead denotes write/read mode + WriteRead = 6 +) + +var modes = map[string]RuleMode{ + "UPGRADE": Upgrade, + "DOWNGRADE": Downgrade, + "UPDOWN": UpDown, + "WRITE": Write, + "READ": Read, + "WRITEREAD": WriteRead, +} + +// ParseMode parses the given rule mode +func ParseMode(mode string) (RuleMode, bool) { + c, ok := modes[strings.ToUpper(mode)] + return c, ok +} + +// RuleSet represents a data contract rule set +type RuleSet struct { + MigrationRules []Rule `json:"migrationRules,omitempty"` + DomainRules []Rule `json:"domainRules,omitempty"` +} + +// HasRules checks if the ruleset has rules for the given mode +func (r *RuleSet) HasRules(mode RuleMode) bool { + switch mode { + case Upgrade, Downgrade: + return r.hasRules(r.MigrationRules, func(ruleMode RuleMode) bool { + return ruleMode == mode || ruleMode == UpDown + }) + case UpDown: + return r.hasRules(r.MigrationRules, func(ruleMode RuleMode) bool { + return ruleMode == mode + }) + case Write, Read: + return r.hasRules(r.DomainRules, func(ruleMode RuleMode) bool { + return ruleMode == mode || ruleMode == WriteRead + }) + case WriteRead: + return r.hasRules(r.DomainRules, func(ruleMode RuleMode) bool { + return ruleMode == mode + }) + } + return false +} + +func (r *RuleSet) hasRules(rules []Rule, filter func(RuleMode) bool) bool { + for _, rule := range rules { + ruleMode, ok := ParseMode(rule.Mode) + if ok && filter(ruleMode) { + return true + } + } + return false +} + +// Metadata represents user-defined metadata +type Metadata struct { + Tags map[string][]string `json:"tags,omitempty"` + Properties map[string]string `json:"properties,omitempty"` + Sensitive []string `json:"sensitive,omitempty"` +} + +// Reference represents a schema reference +type Reference struct { + Name string `json:"name"` + Subject string `json:"subject"` + Version int `json:"version"` +} + +// SchemaInfo represents basic schema information +type SchemaInfo struct { + Schema string `json:"schema,omitempty"` + SchemaType string `json:"schemaType,omitempty"` + References []Reference `json:"references,omitempty"` + Metadata *Metadata `json:"metadata,omitempty"` + RuleSet *RuleSet `json:"ruleSet,omitempty"` +} + +// SubjectAndVersion represents a pair of subject and version +type SubjectAndVersion struct { + Subject string `json:"subject,omitempty"` + Version int `json:"version,omitempty"` +} + +// MarshalJSON implements the json.Marshaler interface +func (sd *SchemaInfo) MarshalJSON() ([]byte, error) { + return json.Marshal(&struct { + Schema string `json:"schema,omitempty"` + SchemaType string `json:"schemaType,omitempty"` + References []Reference `json:"references,omitempty"` + Metadata *Metadata `json:"metadata,omitempty"` + RuleSet *RuleSet `json:"ruleSet,omitempty"` + }{ + sd.Schema, + sd.SchemaType, + sd.References, + sd.Metadata, + sd.RuleSet, + }) +} + +// UnmarshalJSON implements the json.Unmarshaller interface +func (sd *SchemaInfo) UnmarshalJSON(b []byte) error { + var err error + var tmp struct { + Schema string `json:"schema,omitempty"` + SchemaType string `json:"schemaType,omitempty"` + References []Reference `json:"references,omitempty"` + Metadata *Metadata `json:"metadata,omitempty"` + RuleSet *RuleSet `json:"ruleSet,omitempty"` + } + + err = json.Unmarshal(b, &tmp) + + sd.Schema = tmp.Schema + sd.SchemaType = tmp.SchemaType + sd.References = tmp.References + sd.Metadata = tmp.Metadata + sd.RuleSet = tmp.RuleSet + + return err +} + +// SchemaMetadata represents schema metadata +type SchemaMetadata struct { + SchemaInfo + ID int `json:"id,omitempty"` + Subject string `json:"subject,omitempty"` + Version int `json:"version,omitempty"` +} + +// MarshalJSON implements the json.Marshaler interface +func (sd *SchemaMetadata) MarshalJSON() ([]byte, error) { + return json.Marshal(&struct { + Schema string `json:"schema,omitempty"` + SchemaType string `json:"schemaType,omitempty"` + References []Reference `json:"references,omitempty"` + Metadata *Metadata `json:"metadata,omitempty"` + RuleSet *RuleSet `json:"ruleSet,omitempty"` + ID int `json:"id,omitempty"` + Subject string `json:"subject,omitempty"` + Version int `json:"version,omitempty"` + }{ + sd.Schema, + sd.SchemaType, + sd.References, + sd.Metadata, + sd.RuleSet, + sd.ID, + sd.Subject, + sd.Version, + }) +} + +// UnmarshalJSON implements the json.Unmarshaller interface +func (sd *SchemaMetadata) UnmarshalJSON(b []byte) error { + var err error + var tmp struct { + Schema string `json:"schema,omitempty"` + SchemaType string `json:"schemaType,omitempty"` + References []Reference `json:"references,omitempty"` + Metadata *Metadata `json:"metadata,omitempty"` + RuleSet *RuleSet `json:"ruleSet,omitempty"` + ID int `json:"id,omitempty"` + Subject string `json:"subject,omitempty"` + Version int `json:"version,omitempty"` + } + + err = json.Unmarshal(b, &tmp) + + sd.Schema = tmp.Schema + sd.SchemaType = tmp.SchemaType + sd.References = tmp.References + sd.Metadata = tmp.Metadata + sd.RuleSet = tmp.RuleSet + sd.ID = tmp.ID + sd.Subject = tmp.Subject + sd.Version = tmp.Version + + return err +} + +// ServerConfig represents config params for Schema Registry +/* NOTE: GET uses compatibilityLevel, POST uses compatibility */ +type ServerConfig struct { + Alias string `json:"alias,omitempty"` + Normalize bool `json:"normalize,omitempty"` + CompatibilityUpdate Compatibility `json:"compatibility,omitempty"` + CompatibilityLevel Compatibility `json:"compatibilityLevel,omitempty"` + CompatibilityGroup string `json:"compatibilityGroup,omitempty"` + DefaultMetadata *Metadata `json:"defaultMetadata,omitempty"` + OverrideMetadata *Metadata `json:"overrideMetadata,omitempty"` + DefaultRuleSet *RuleSet `json:"defaultRuleSet,omitempty"` + OverrideRuleSet *RuleSet `json:"overrideRuleSet,omitempty"` +} + +type subjectJSON struct { + subject string + json string +} + +type subjectID struct { + subject string + id int +} + +type subjectVersion struct { + subject string + version int + deleted bool +} + +type subjectMetadata struct { + subject string + metadata string + deleted bool +} + +/* HTTP(S) Schema Registry Client and schema caches */ +type client struct { + sync.Mutex + config *Config + restService *internal.RestService + infoToSchemaCache cache.Cache + infoToSchemaCacheLock sync.RWMutex + idToSchemaInfoCache cache.Cache + idToSchemaInfoCacheLock sync.RWMutex + schemaToVersionCache cache.Cache + schemaToVersionCacheLock sync.RWMutex + versionToSchemaCache cache.Cache + versionToSchemaCacheLock sync.RWMutex + latestToSchemaCache cache.Cache + latestToSchemaCacheLock sync.RWMutex + metadataToSchemaCache cache.Cache + metadataToSchemaCacheLock sync.RWMutex + evictor *evictor +} + +var _ Client = new(client) + +// Client is an interface for clients interacting with the Confluent Schema Registry. +// The Schema Registry's REST interface is further explained in Confluent's Schema Registry API documentation +// https://github.com/confluentinc/schema-registry/blob/master/client/src/main/java/io/confluent/kafka/schemaregistry/client/SchemaRegistryClient.java +type Client interface { + Config() *Config + GetAllContexts() ([]string, error) + Register(subject string, schema SchemaInfo, normalize bool) (id int, err error) + RegisterFullResponse(subject string, schema SchemaInfo, normalize bool) (result SchemaMetadata, err error) + GetBySubjectAndID(subject string, id int) (schema SchemaInfo, err error) + GetSubjectsAndVersionsByID(id int) (subjectAndVersion []SubjectAndVersion, err error) + GetID(subject string, schema SchemaInfo, normalize bool) (id int, err error) + GetLatestSchemaMetadata(subject string) (SchemaMetadata, error) + GetSchemaMetadata(subject string, version int) (SchemaMetadata, error) + GetSchemaMetadataIncludeDeleted(subject string, version int, deleted bool) (SchemaMetadata, error) + GetLatestWithMetadata(subject string, metadata map[string]string, deleted bool) (SchemaMetadata, error) + GetAllVersions(subject string) ([]int, error) + GetVersion(subject string, schema SchemaInfo, normalize bool) (version int, err error) + GetAllSubjects() ([]string, error) + DeleteSubject(subject string, permanent bool) ([]int, error) + DeleteSubjectVersion(subject string, version int, permanent bool) (deletes int, err error) + TestSubjectCompatibility(subject string, schema SchemaInfo) (compatible bool, err error) + TestCompatibility(subject string, version int, schema SchemaInfo) (compatible bool, err error) + GetCompatibility(subject string) (compatibility Compatibility, err error) + UpdateCompatibility(subject string, update Compatibility) (compatibility Compatibility, err error) + GetDefaultCompatibility() (compatibility Compatibility, err error) + UpdateDefaultCompatibility(update Compatibility) (compatibility Compatibility, err error) + GetConfig(subject string, defaultToGlobal bool) (result ServerConfig, err error) + UpdateConfig(subject string, update ServerConfig) (result ServerConfig, err error) + GetDefaultConfig() (result ServerConfig, err error) + UpdateDefaultConfig(update ServerConfig) (result ServerConfig, err error) + ClearLatestCaches() error + ClearCaches() error + Close() error +} + +// NewClient returns a Client implementation +func NewClient(conf *Config) (Client, error) { + + urlConf := conf.SchemaRegistryURL + // for testing + if strings.HasPrefix(urlConf, "mock://") { + url, err := url.Parse(urlConf) + if err != nil { + return nil, err + } + mock := &mockclient{ + config: conf, + url: url, + infoToSchemaCache: make(map[subjectJSON]metadataCacheEntry), + idToSchemaCache: make(map[subjectID]infoCacheEntry), + schemaToVersionCache: make(map[subjectJSON]versionCacheEntry), + configCache: make(map[string]ServerConfig), + } + return mock, nil + } + + restService, err := internal.NewRestService(&conf.ClientConfig) + if err != nil { + return nil, err + } + + var schemaToIDCache cache.Cache + var idToSchemaCache cache.Cache + var schemaToVersionCache cache.Cache + var versionToSchemaCache cache.Cache + var latestToSchemaCache cache.Cache + var metadataToSchemaCache cache.Cache + if conf.CacheCapacity != 0 { + schemaToIDCache, err = cache.NewLRUCache(conf.CacheCapacity) + if err != nil { + return nil, err + } + idToSchemaCache, err = cache.NewLRUCache(conf.CacheCapacity) + if err != nil { + return nil, err + } + schemaToVersionCache, err = cache.NewLRUCache(conf.CacheCapacity) + if err != nil { + return nil, err + } + versionToSchemaCache, err = cache.NewLRUCache(conf.CacheCapacity) + if err != nil { + return nil, err + } + latestToSchemaCache, err = cache.NewLRUCache(conf.CacheCapacity) + if err != nil { + return nil, err + } + metadataToSchemaCache, err = cache.NewLRUCache(conf.CacheCapacity) + if err != nil { + return nil, err + } + } else { + schemaToIDCache = cache.NewMapCache() + idToSchemaCache = cache.NewMapCache() + schemaToVersionCache = cache.NewMapCache() + versionToSchemaCache = cache.NewMapCache() + latestToSchemaCache = cache.NewMapCache() + metadataToSchemaCache = cache.NewMapCache() + } + handle := &client{ + config: conf, + restService: restService, + infoToSchemaCache: schemaToIDCache, + idToSchemaInfoCache: idToSchemaCache, + schemaToVersionCache: schemaToVersionCache, + versionToSchemaCache: versionToSchemaCache, + latestToSchemaCache: latestToSchemaCache, + metadataToSchemaCache: metadataToSchemaCache, + } + if conf.CacheLatestTTLSecs > 0 { + runEvictor(handle, time.Duration(conf.CacheLatestTTLSecs)*time.Second) + runtime.SetFinalizer(handle, stopEvictor) + } + return handle, nil +} + +// Returns a string slice containing all available contexts +func (c *client) GetAllContexts() ([]string, error) { + var result []string + err := c.restService.HandleRequest(internal.NewRequest("GET", internal.Contexts, nil), &result) + + return result, err +} + +// Config returns the client config +func (c *client) Config() *Config { + return c.config +} + +// Register registers Schema aliased with subject +func (c *client) Register(subject string, schema SchemaInfo, normalize bool) (id int, err error) { + metadata, err := c.RegisterFullResponse(subject, schema, normalize) + if err != nil { + return -1, err + } + return metadata.ID, err +} + +// RegisterFullResponse registers Schema aliased with subject +func (c *client) RegisterFullResponse(subject string, schema SchemaInfo, normalize bool) (result SchemaMetadata, err error) { + schemaJSON, err := schema.MarshalJSON() + if err != nil { + return SchemaMetadata{ + ID: -1, + }, err + } + cacheKey := subjectJSON{ + subject: subject, + json: string(schemaJSON), + } + c.infoToSchemaCacheLock.RLock() + metadataValue, ok := c.infoToSchemaCache.Get(cacheKey) + c.infoToSchemaCacheLock.RUnlock() + if ok { + return *metadataValue.(*SchemaMetadata), nil + } + + input := SchemaMetadata{ + SchemaInfo: schema, + } + c.infoToSchemaCacheLock.Lock() + // another goroutine could have already put it in cache + metadataValue, ok = c.infoToSchemaCache.Get(cacheKey) + if !ok { + err = c.restService.HandleRequest(internal.NewRequest("POST", internal.VersionNormalize, &input, url.PathEscape(subject), normalize), &result) + if err == nil { + c.infoToSchemaCache.Put(cacheKey, &result) + } else { + result = SchemaMetadata{ + ID: -1, + } + } + } else { + result = *metadataValue.(*SchemaMetadata) + } + c.infoToSchemaCacheLock.Unlock() + return result, err +} + +// GetBySubjectAndID returns the schema identified by id +// Returns Schema object on success +func (c *client) GetBySubjectAndID(subject string, id int) (schema SchemaInfo, err error) { + cacheKey := subjectID{ + subject: subject, + id: id, + } + c.idToSchemaInfoCacheLock.RLock() + infoValue, ok := c.idToSchemaInfoCache.Get(cacheKey) + c.idToSchemaInfoCacheLock.RUnlock() + if ok { + return *infoValue.(*SchemaInfo), nil + } + + metadata := SchemaMetadata{} + newInfo := &SchemaInfo{} + c.idToSchemaInfoCacheLock.Lock() + // another goroutine could have already put it in cache + infoValue, ok = c.idToSchemaInfoCache.Get(cacheKey) + if !ok { + if len(subject) > 0 { + err = c.restService.HandleRequest(internal.NewRequest("GET", internal.SchemasBySubject, nil, id, url.QueryEscape(subject)), &metadata) + } else { + err = c.restService.HandleRequest(internal.NewRequest("GET", internal.Schemas, nil, id), &metadata) + } + if err == nil { + newInfo = &metadata.SchemaInfo + c.idToSchemaInfoCache.Put(cacheKey, newInfo) + } + } else { + newInfo = infoValue.(*SchemaInfo) + } + c.idToSchemaInfoCacheLock.Unlock() + return *newInfo, err +} + +// GetSubjectsAndVersionsByID returns the subject-version pairs for a given ID. +// Returns SubjectAndVersion object on success. +// This method cannot not use caching to increase performance. +func (c *client) GetSubjectsAndVersionsByID(id int) (subbjectsAndVersions []SubjectAndVersion, err error) { + err = c.restService.HandleRequest(internal.NewRequest("GET", internal.SubjectsAndVersionsByID, nil, id), &subbjectsAndVersions) + return +} + +// GetID checks if a schema has been registered with the subject. Returns ID if the registration can be found +func (c *client) GetID(subject string, schema SchemaInfo, normalize bool) (id int, err error) { + schemaJSON, err := schema.MarshalJSON() + if err != nil { + return -1, err + } + cacheKey := subjectJSON{ + subject: subject, + json: string(schemaJSON), + } + c.infoToSchemaCacheLock.RLock() + metadataValue, ok := c.infoToSchemaCache.Get(cacheKey) + c.infoToSchemaCacheLock.RUnlock() + if ok { + md := *metadataValue.(*SchemaMetadata) + return md.ID, nil + } + + metadata := SchemaMetadata{ + SchemaInfo: schema, + } + c.infoToSchemaCacheLock.Lock() + // another goroutine could have already put it in cache + metadataValue, ok = c.infoToSchemaCache.Get(cacheKey) + if !ok { + err = c.restService.HandleRequest(internal.NewRequest("POST", internal.SubjectsNormalize, &metadata, url.PathEscape(subject), normalize), &metadata) + if err == nil { + c.infoToSchemaCache.Put(cacheKey, &metadata) + } else { + metadata.ID = -1 + } + } else { + md := *metadataValue.(*SchemaMetadata) + metadata.ID = md.ID + } + c.infoToSchemaCacheLock.Unlock() + return metadata.ID, err +} + +// GetLatestSchemaMetadata fetches latest version registered with the provided subject +// Returns SchemaMetadata object +func (c *client) GetLatestSchemaMetadata(subject string) (result SchemaMetadata, err error) { + c.latestToSchemaCacheLock.RLock() + metadataValue, ok := c.latestToSchemaCache.Get(subject) + c.latestToSchemaCacheLock.RUnlock() + if ok { + return *metadataValue.(*SchemaMetadata), nil + } + + c.latestToSchemaCacheLock.Lock() + // another goroutine could have already put it in cache + metadataValue, ok = c.latestToSchemaCache.Get(subject) + if !ok { + err = c.restService.HandleRequest(internal.NewRequest("GET", internal.Versions, nil, url.PathEscape(subject), "latest"), &result) + if err == nil { + c.latestToSchemaCache.Put(subject, &result) + } + } else { + result = *metadataValue.(*SchemaMetadata) + } + c.latestToSchemaCacheLock.Unlock() + return result, err +} + +// GetSchemaMetadata fetches the requested subject schema identified by version +// Returns SchemaMetadata object +func (c *client) GetSchemaMetadata(subject string, version int) (result SchemaMetadata, err error) { + return c.GetSchemaMetadataIncludeDeleted(subject, version, false) +} + +// GetSchemaMetadataIncludeDeleted fetches the requested subject schema identified by version and deleted flag +// Returns SchemaMetadata object +func (c *client) GetSchemaMetadataIncludeDeleted(subject string, version int, deleted bool) (result SchemaMetadata, err error) { + cacheKey := subjectVersion{ + subject: subject, + version: version, + deleted: deleted, + } + c.versionToSchemaCacheLock.RLock() + metadataValue, ok := c.versionToSchemaCache.Get(cacheKey) + c.versionToSchemaCacheLock.RUnlock() + if ok { + return *metadataValue.(*SchemaMetadata), nil + } + + c.versionToSchemaCacheLock.Lock() + // another goroutine could have already put it in cache + metadataValue, ok = c.versionToSchemaCache.Get(cacheKey) + if !ok { + err = c.restService.HandleRequest(internal.NewRequest("GET", internal.VersionsIncludeDeleted, nil, url.PathEscape(subject), version, deleted), &result) + if err == nil { + c.versionToSchemaCache.Put(cacheKey, &result) + } + } else { + result = *metadataValue.(*SchemaMetadata) + } + c.versionToSchemaCacheLock.Unlock() + return result, err +} + +// GetLatestWithMetadata fetches the latest subject schema with the given metadata +// Returns SchemaMetadata object +func (c *client) GetLatestWithMetadata(subject string, metadata map[string]string, deleted bool) (result SchemaMetadata, err error) { + b, _ := json.Marshal(metadata) + metadataStr := string(b) + cacheKey := subjectMetadata{ + subject: subject, + metadata: metadataStr, + deleted: deleted, + } + c.metadataToSchemaCacheLock.RLock() + metadataValue, ok := c.metadataToSchemaCache.Get(cacheKey) + c.metadataToSchemaCacheLock.RUnlock() + if ok { + return *metadataValue.(*SchemaMetadata), nil + } + + c.metadataToSchemaCacheLock.Lock() + // another goroutine could have already put it in cache + metadataValue, ok = c.metadataToSchemaCache.Get(cacheKey) + sb := strings.Builder{} + for key, value := range metadata { + _, _ = sb.WriteString("&key=") + _, _ = sb.WriteString(key) + _, _ = sb.WriteString("&value=") + _, _ = sb.WriteString(value) + } + if !ok { + err = c.restService.HandleRequest(internal.NewRequest("GET", internal.LatestWithMetadata, nil, url.PathEscape(subject), deleted, sb.String()), &result) + if err == nil { + c.metadataToSchemaCache.Put(cacheKey, &result) + } + } else { + result = *metadataValue.(*SchemaMetadata) + } + c.metadataToSchemaCacheLock.Unlock() + return result, err +} + +// GetAllVersions fetches a list of all version numbers associated with the provided subject registration +// Returns integer slice on success +func (c *client) GetAllVersions(subject string) (results []int, err error) { + var result []int + err = c.restService.HandleRequest(internal.NewRequest("GET", internal.Version, nil, url.PathEscape(subject)), &result) + + return result, err +} + +// GetVersion finds the Subject SchemaMetadata associated with the provided schema +// Returns integer SchemaMetadata number +func (c *client) GetVersion(subject string, schema SchemaInfo, normalize bool) (version int, err error) { + schemaJSON, err := schema.MarshalJSON() + if err != nil { + return -1, err + } + cacheKey := subjectJSON{ + subject: subject, + json: string(schemaJSON), + } + c.schemaToVersionCacheLock.RLock() + versionValue, ok := c.schemaToVersionCache.Get(cacheKey) + c.schemaToVersionCacheLock.RUnlock() + if ok { + return versionValue.(int), nil + } + + metadata := SchemaMetadata{ + SchemaInfo: schema, + } + c.schemaToVersionCacheLock.Lock() + // another goroutine could have already put it in cache + versionValue, ok = c.schemaToVersionCache.Get(cacheKey) + if !ok { + err = c.restService.HandleRequest(internal.NewRequest("POST", internal.SubjectsNormalize, &metadata, url.PathEscape(subject), normalize), &metadata) + if err == nil { + c.schemaToVersionCache.Put(cacheKey, metadata.Version) + } else { + metadata.Version = -1 + } + } else { + metadata.Version = versionValue.(int) + } + c.schemaToVersionCacheLock.Unlock() + return metadata.Version, err +} + +// Fetch all Subjects registered with the schema Registry +// Returns a string slice containing all registered subjects +func (c *client) GetAllSubjects() ([]string, error) { + var result []string + err := c.restService.HandleRequest(internal.NewRequest("GET", internal.Subject, nil), &result) + + return result, err +} + +// Deletes provided Subject from registry +// Returns integer slice of versions removed by delete +func (c *client) DeleteSubject(subject string, permanent bool) (deleted []int, err error) { + c.infoToSchemaCacheLock.Lock() + for keyValue := range c.infoToSchemaCache.ToMap() { + key := keyValue.(subjectJSON) + if key.subject == subject { + c.infoToSchemaCache.Delete(key) + } + } + c.infoToSchemaCacheLock.Unlock() + c.schemaToVersionCacheLock.Lock() + for keyValue := range c.schemaToVersionCache.ToMap() { + key := keyValue.(subjectJSON) + if key.subject == subject { + c.schemaToVersionCache.Delete(key) + } + } + c.schemaToVersionCacheLock.Unlock() + c.versionToSchemaCacheLock.Lock() + for keyValue := range c.versionToSchemaCache.ToMap() { + key := keyValue.(subjectVersion) + if key.subject == subject { + c.versionToSchemaCache.Delete(key) + } + } + c.versionToSchemaCacheLock.Unlock() + c.idToSchemaInfoCacheLock.Lock() + for keyValue := range c.idToSchemaInfoCache.ToMap() { + key := keyValue.(subjectID) + if key.subject == subject { + c.idToSchemaInfoCache.Delete(key) + } + } + c.idToSchemaInfoCacheLock.Unlock() + var result []int + err = c.restService.HandleRequest(internal.NewRequest("DELETE", internal.SubjectsDelete, nil, url.PathEscape(subject), permanent), &result) + return result, err +} + +// DeleteSubjectVersion removes the version identified by delete from the subject's registration +// Returns integer id for the deleted version +func (c *client) DeleteSubjectVersion(subject string, version int, permanent bool) (deleted int, err error) { + c.schemaToVersionCacheLock.Lock() + for keyValue, value := range c.schemaToVersionCache.ToMap() { + key := keyValue.(subjectJSON) + if key.subject == subject && value == version { + c.schemaToVersionCache.Delete(key) + schemaJSON := key.json + cacheKeySchema := subjectJSON{ + subject: subject, + json: schemaJSON, + } + c.infoToSchemaCacheLock.Lock() + metadataValue, ok := c.infoToSchemaCache.Get(cacheKeySchema) + if ok { + c.infoToSchemaCache.Delete(cacheKeySchema) + } + c.infoToSchemaCacheLock.Unlock() + if ok { + md := *metadataValue.(*SchemaMetadata) + c.idToSchemaInfoCacheLock.Lock() + cacheKeyID := subjectID{ + subject: subject, + id: md.ID, + } + c.idToSchemaInfoCache.Delete(cacheKeyID) + c.idToSchemaInfoCacheLock.Unlock() + } + } + } + c.schemaToVersionCacheLock.Unlock() + c.versionToSchemaCacheLock.Lock() + cacheKey := subjectVersion{ + subject: subject, + version: version, + } + c.versionToSchemaCache.Delete(cacheKey) + c.versionToSchemaCacheLock.Unlock() + var result int + err = c.restService.HandleRequest(internal.NewRequest("DELETE", internal.VersionsDelete, nil, url.PathEscape(subject), version, permanent), &result) + return result, err + +} + +// Compatibility options +type Compatibility int + +const ( + _ = iota + // None is no compatibility + None + // Backward compatibility + Backward + // Forward compatibility + Forward + // Full compatibility + Full + // BackwardTransitive compatibility + BackwardTransitive + // ForwardTransitive compatibility + ForwardTransitive + // FullTransitive compatibility + FullTransitive +) + +var compatibilityEnum = []string{ + "", + "NONE", + "BACKWARD", + "FORWARD", + "FULL", + "BACKWARD_TRANSITIVE", + "FORWARD_TRANSITIVE", + "FULL_TRANSITIVE", +} + +/* NOTE: GET uses compatibilityLevel, POST uses compatibility */ +type compatibilityLevel struct { + CompatibilityUpdate Compatibility `json:"compatibility,omitempty"` + Compatibility Compatibility `json:"compatibilityLevel,omitempty"` +} + +// MarshalJSON implements json.Marshaler +func (c *Compatibility) MarshalJSON() ([]byte, error) { + return json.Marshal(c.String()) +} + +// UnmarshalJSON implements json.Unmarshaler +func (c *Compatibility) UnmarshalJSON(b []byte) error { + val := string(b[1 : len(b)-1]) + return c.ParseString(val) +} + +type compatibilityValue struct { + Compatible bool `json:"is_compatible,omitempty"` +} + +func (c *Compatibility) String() string { + return compatibilityEnum[*c] +} + +// ParseString returns a Compatibility for the given string +func (c *Compatibility) ParseString(val string) error { + for idx, elm := range compatibilityEnum { + if elm == val { + *c = Compatibility(idx) + return nil + } + } + + return fmt.Errorf("failed to unmarshal Compatibility") +} + +// TestSubjectCompatibility verifies schema against all schemas in the subject +// Returns true if the schema is compatible, false otherwise +func (c *client) TestSubjectCompatibility(subject string, schema SchemaInfo) (ok bool, err error) { + var result compatibilityValue + candidate := SchemaMetadata{ + SchemaInfo: schema, + } + + err = c.restService.HandleRequest(internal.NewRequest("POST", internal.SubjectCompatibility, &candidate, url.PathEscape(subject)), &result) + + return result.Compatible, err +} + +// TestCompatibility verifies schema against the subject's compatibility policy +// Returns true if the schema is compatible, false otherwise +func (c *client) TestCompatibility(subject string, version int, schema SchemaInfo) (ok bool, err error) { + var result compatibilityValue + candidate := SchemaMetadata{ + SchemaInfo: schema, + } + + err = c.restService.HandleRequest(internal.NewRequest("POST", internal.Compatibility, &candidate, url.PathEscape(subject), version), &result) + + return result.Compatible, err +} + +// Fetch compatibility level currently configured for provided subject +// Returns compatibility level string upon success +func (c *client) GetCompatibility(subject string) (compatibility Compatibility, err error) { + var result compatibilityLevel + err = c.restService.HandleRequest(internal.NewRequest("GET", internal.SubjectConfig, nil, url.PathEscape(subject)), &result) + + return result.Compatibility, err +} + +// UpdateCompatibility updates subject's compatibility level +// Returns new compatibility level string upon success +func (c *client) UpdateCompatibility(subject string, update Compatibility) (compatibility Compatibility, err error) { + result := compatibilityLevel{ + CompatibilityUpdate: update, + } + err = c.restService.HandleRequest(internal.NewRequest("PUT", internal.SubjectConfig, &result, url.PathEscape(subject)), &result) + + return result.CompatibilityUpdate, err +} + +// GetDefaultCompatibility fetches the global(default) compatibility level +// Returns global(default) compatibility level +func (c *client) GetDefaultCompatibility() (compatibility Compatibility, err error) { + var result compatibilityLevel + err = c.restService.HandleRequest(internal.NewRequest("GET", internal.Config, nil), &result) + + return result.Compatibility, err +} + +// UpdateDefaultCompatibility updates the global(default) compatibility level +// Returns new string compatibility level +func (c *client) UpdateDefaultCompatibility(update Compatibility) (compatibility Compatibility, err error) { + result := compatibilityLevel{ + CompatibilityUpdate: update, + } + err = c.restService.HandleRequest(internal.NewRequest("PUT", internal.Config, &result), &result) + + return result.CompatibilityUpdate, err +} + +// Fetch config currently configured for provided subject +// Returns config upon success +func (c *client) GetConfig(subject string, defaultToGlobal bool) (result ServerConfig, err error) { + err = c.restService.HandleRequest(internal.NewRequest("GET", internal.SubjectConfigDefault, nil, url.PathEscape(subject), defaultToGlobal), &result) + + return result, err +} + +// UpdateConfig updates subject's config +// Returns new config string upon success +func (c *client) UpdateConfig(subject string, update ServerConfig) (result ServerConfig, err error) { + err = c.restService.HandleRequest(internal.NewRequest("PUT", internal.SubjectConfig, &update, url.PathEscape(subject)), &result) + + return result, err +} + +// GetDefaultCompatibility fetches the global(default) config +// Returns global(default) config +func (c *client) GetDefaultConfig() (result ServerConfig, err error) { + err = c.restService.HandleRequest(internal.NewRequest("GET", internal.Config, nil), &result) + + return result, err +} + +// UpdateDefaultCompatibility updates the global(default) config +// Returns new string config +func (c *client) UpdateDefaultConfig(update ServerConfig) (result ServerConfig, err error) { + err = c.restService.HandleRequest(internal.NewRequest("PUT", internal.Config, &update), &result) + + return result, err +} + +// ClearLatestCaches clears caches of latest versions +func (c *client) ClearLatestCaches() error { + c.latestToSchemaCacheLock.Lock() + c.latestToSchemaCache.Clear() + c.latestToSchemaCacheLock.Unlock() + c.metadataToSchemaCacheLock.Lock() + c.metadataToSchemaCache.Clear() + c.metadataToSchemaCacheLock.Unlock() + return nil +} + +// ClearCaches clears all caches +func (c *client) ClearCaches() error { + c.infoToSchemaCacheLock.Lock() + c.infoToSchemaCache.Clear() + c.infoToSchemaCacheLock.Unlock() + c.idToSchemaInfoCacheLock.Lock() + c.idToSchemaInfoCache.Clear() + c.idToSchemaInfoCacheLock.Unlock() + c.schemaToVersionCacheLock.Lock() + c.schemaToVersionCache.Clear() + c.schemaToVersionCacheLock.Unlock() + c.versionToSchemaCacheLock.Lock() + c.versionToSchemaCache.Clear() + c.versionToSchemaCacheLock.Unlock() + c.latestToSchemaCacheLock.Lock() + c.latestToSchemaCache.Clear() + c.latestToSchemaCacheLock.Unlock() + c.metadataToSchemaCacheLock.Lock() + c.metadataToSchemaCache.Clear() + c.metadataToSchemaCacheLock.Unlock() + return nil +} + +// Close closes the client +func (c *client) Close() error { + c.ClearCaches() + return nil +} + +type evictor struct { + Interval time.Duration + stop chan bool +} + +func (e *evictor) Run(c cache.Cache) { + ticker := time.NewTicker(e.Interval) + for { + select { + case <-ticker.C: + c.Clear() + case <-e.stop: + ticker.Stop() + return + } + } +} + +func stopEvictor(c *client) { + c.evictor.stop <- true +} + +func runEvictor(c *client, ci time.Duration) { + e := &evictor{ + Interval: ci, + stop: make(chan bool), + } + c.evictor = e + go e.Run(c.latestToSchemaCache) +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/serde/avrov2/avro.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/serde/avrov2/avro.go new file mode 100644 index 00000000..f58b72ec --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/serde/avrov2/avro.go @@ -0,0 +1,482 @@ +/** + * Copyright 2022 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package avrov2 + +import ( + "encoding" + "errors" + "fmt" + "github.com/confluentinc/confluent-kafka-go/v2/schemaregistry" + "github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/cache" + "github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/serde" + "github.com/hamba/avro/v2" + "reflect" + "strings" + "sync" + "time" +) + +// Serializer represents a generic Avro serializer +type Serializer struct { + serde.BaseSerializer + *Serde +} + +// Deserializer represents a generic Avro deserializer +type Deserializer struct { + serde.BaseDeserializer + *Serde +} + +// Serde represents an Avro serde +type Serde struct { + resolver *avro.TypeResolver + schemaToTypeCache cache.Cache + schemaToTypeCacheLock sync.RWMutex +} + +var _ serde.Serializer = new(Serializer) +var _ serde.Deserializer = new(Deserializer) + +// NewSerializer creates an Avro serializer for generic objects +func NewSerializer(client schemaregistry.Client, serdeType serde.Type, conf *SerializerConfig) (*Serializer, error) { + schemaToTypeCache, err := cache.NewLRUCache(1000) + if err != nil { + return nil, err + } + ps := &Serde{ + resolver: avro.NewTypeResolver(), + schemaToTypeCache: schemaToTypeCache, + } + s := &Serializer{ + Serde: ps, + } + err = s.ConfigureSerializer(client, serdeType, &conf.SerializerConfig) + if err != nil { + return nil, err + } + fieldTransformer := func(ctx serde.RuleContext, fieldTransform serde.FieldTransform, msg interface{}) (interface{}, error) { + return s.FieldTransform(s.Client, ctx, fieldTransform, msg) + } + s.FieldTransformer = fieldTransformer + err = s.SetRuleRegistry(serde.GlobalRuleRegistry(), conf.RuleConfig) + if err != nil { + return nil, err + } + return s, nil +} + +// Serialize implements serialization of generic Avro data +func (s *Serializer) Serialize(topic string, msg interface{}) ([]byte, error) { + if msg == nil { + return nil, nil + } + var avroSchema avro.Schema + var info schemaregistry.SchemaInfo + var err error + // Don't derive the schema if it is being looked up in the following ways + if s.Conf.UseSchemaID == -1 && + !s.Conf.UseLatestVersion && + len(s.Conf.UseLatestWithMetadata) == 0 { + msgType := reflect.TypeOf(msg) + if msgType.Kind() != reflect.Pointer { + return nil, errors.New("input message must be a pointer") + } + avroSchema, err = StructToSchema(msgType.Elem()) + if err != nil { + return nil, err + } + info = schemaregistry.SchemaInfo{ + Schema: avroSchema.String(), + } + } + id, err := s.GetID(topic, msg, &info) + if err != nil { + return nil, err + } + avroSchema, _, err = s.toType(s.Client, info) + if err != nil { + return nil, err + } + subject, err := s.SubjectNameStrategy(topic, s.SerdeType, info) + if err != nil { + return nil, err + } + msg, err = s.ExecuteRules(subject, topic, schemaregistry.Write, nil, &info, msg) + if err != nil { + return nil, err + } + // Convert pointer to non-pointer + msg = reflect.ValueOf(msg).Elem().Interface() + msgBytes, err := avro.Marshal(avroSchema, msg) + if err != nil { + return nil, err + } + payload, err := s.WriteBytes(id, msgBytes) + if err != nil { + return nil, err + } + return payload, nil +} + +// NewDeserializer creates an Avro deserializer for generic objects +func NewDeserializer(client schemaregistry.Client, serdeType serde.Type, conf *DeserializerConfig) (*Deserializer, error) { + schemaToTypeCache, err := cache.NewLRUCache(1000) + if err != nil { + return nil, err + } + ps := &Serde{ + resolver: avro.NewTypeResolver(), + schemaToTypeCache: schemaToTypeCache, + } + s := &Deserializer{ + Serde: ps, + } + err = s.ConfigureDeserializer(client, serdeType, &conf.DeserializerConfig) + if err != nil { + return nil, err + } + fieldTransformer := func(ctx serde.RuleContext, fieldTransform serde.FieldTransform, msg interface{}) (interface{}, error) { + return s.FieldTransform(s.Client, ctx, fieldTransform, msg) + } + s.FieldTransformer = fieldTransformer + err = s.SetRuleRegistry(serde.GlobalRuleRegistry(), conf.RuleConfig) + if err != nil { + return nil, err + } + return s, nil +} + +// Deserialize implements deserialization of generic Avro data +func (s *Deserializer) Deserialize(topic string, payload []byte) (interface{}, error) { + return s.deserialize(topic, payload, nil) +} + +// DeserializeInto implements deserialization of generic Avro data to the given object +func (s *Deserializer) DeserializeInto(topic string, payload []byte, msg interface{}) error { + _, err := s.deserialize(topic, payload, msg) + return err +} + +func (s *Deserializer) deserialize(topic string, payload []byte, result interface{}) (interface{}, error) { + if len(payload) == 0 { + return nil, nil + } + info, err := s.GetSchema(topic, payload) + if err != nil { + return nil, err + } + subject, err := s.SubjectNameStrategy(topic, s.SerdeType, info) + if err != nil { + return nil, err + } + readerMeta, err := s.GetReaderSchema(subject) + if err != nil { + return nil, err + } + var migrations []serde.Migration + if readerMeta != nil { + migrations, err = s.GetMigrations(subject, topic, &info, readerMeta, payload) + if err != nil { + return nil, err + } + } + writer, name, err := s.toType(s.Client, info) + if err != nil { + return nil, err + } + var msg interface{} + if len(migrations) > 0 { + err = avro.Unmarshal(writer, payload[5:], &msg) + if err != nil { + return nil, err + } + msg, err = s.ExecuteMigrations(migrations, subject, topic, msg) + if err != nil { + return nil, err + } + var reader avro.Schema + reader, name, err = s.toType(s.Client, readerMeta.SchemaInfo) + if err != nil { + return nil, err + } + var bytes []byte + bytes, err = avro.Marshal(reader, msg) + if err != nil { + return nil, err + } + if result == nil { + msg, err = s.MessageFactory(subject, name) + if err != nil { + return nil, err + } + } else { + msg = result + } + err = avro.Unmarshal(reader, bytes, msg) + if err != nil { + return nil, err + } + } else { + if result == nil { + msg, err = s.MessageFactory(subject, name) + if err != nil { + return nil, err + } + } else { + msg = result + } + if readerMeta != nil { + var reader avro.Schema + reader, name, err = s.toType(s.Client, readerMeta.SchemaInfo) + if err != nil { + return nil, err + } + if reader.CacheFingerprint() != writer.CacheFingerprint() { + // reader and writer are different, perform schema resolution + sc := avro.NewSchemaCompatibility() + reader, err = sc.Resolve(reader, writer) + if err != nil { + return nil, err + } + } + err = avro.Unmarshal(reader, payload[5:], msg) + if err != nil { + return nil, err + } + } else { + err = avro.Unmarshal(writer, payload[5:], msg) + if err != nil { + return nil, err + } + } + } + var target *schemaregistry.SchemaInfo + if readerMeta != nil { + target = &readerMeta.SchemaInfo + } else { + target = &info + } + msg, err = s.ExecuteRules(subject, topic, schemaregistry.Read, nil, target, msg) + if err != nil { + return nil, err + } + return msg, nil +} + +// RegisterType registers a type with the Avro Serde +func (s *Serde) RegisterType(name string, msgType interface{}) { + s.resolver.Register(name, msgType) +} + +// RegisterTypeFromMessageFactory registers a type with the Avro Serde using a message factory +func (s *Serde) RegisterTypeFromMessageFactory(name string, messageFactory serde.MessageFactory) error { + if messageFactory == nil { + return errors.New("MessageFactory is nil") + } + typ, err := messageFactory("", name) + if err != nil { + return err + } + v := reflect.ValueOf(typ) + s.RegisterType(name, v.Elem().Interface()) + return nil +} + +// FieldTransform transforms a field value using the given field transform +func (s *Serde) FieldTransform(client schemaregistry.Client, ctx serde.RuleContext, fieldTransform serde.FieldTransform, msg interface{}) (interface{}, error) { + schema, _, err := s.toType(client, *ctx.Target) + if err != nil { + return nil, err + } + val := reflect.ValueOf(msg) + newVal, err := transform(ctx, s.resolver, schema, &val, fieldTransform) + if err != nil { + return nil, err + } + return newVal.Interface(), nil +} + +func (s *Serde) toType(client schemaregistry.Client, schema schemaregistry.SchemaInfo) (avro.Schema, string, error) { + s.schemaToTypeCacheLock.RLock() + value, ok := s.schemaToTypeCache.Get(schema.Schema) + s.schemaToTypeCacheLock.RUnlock() + if ok { + avroType := value.(avro.Schema) + return avroType, name(avroType), nil + } + avroType, err := resolveAvroReferences(client, schema) + if err != nil { + return nil, "", err + } + s.schemaToTypeCacheLock.Lock() + s.schemaToTypeCache.Put(schema.Schema, avroType) + s.schemaToTypeCacheLock.Unlock() + return avroType, name(avroType), nil +} + +func name(avroType avro.Schema) string { + named, ok := avroType.(avro.NamedSchema) + if ok { + return named.FullName() + } + return "" +} + +func resolveAvroReferences(c schemaregistry.Client, schema schemaregistry.SchemaInfo) (avro.Schema, error) { + for _, ref := range schema.References { + metadata, err := c.GetSchemaMetadataIncludeDeleted(ref.Subject, ref.Version, true) + if err != nil { + return nil, err + } + info := metadata.SchemaInfo + _, err = resolveAvroReferences(c, info) + if err != nil { + return nil, err + } + + } + sType, err := avro.Parse(schema.Schema) + if err != nil { + return nil, err + } + return sType, nil +} + +// StructToSchema generates an Avro schema from the given struct type +func StructToSchema(t reflect.Type, tags ...reflect.StructTag) (avro.Schema, error) { + var schFields []*avro.Field + switch t.Kind() { + case reflect.Struct: + if t.ConvertibleTo(reflect.TypeOf(time.Time{})) { + return avro.NewPrimitiveSchema(avro.Long, avro.NewPrimitiveLogicalSchema(avro.TimestampMillis)), nil + } + if t.Implements(reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()) { + subtype := strings.Split(t.String(), ".") + return avro.NewPrimitiveSchema(avro.String, nil, avro.WithProps(map[string]any{"subtype": strings.ToLower(subtype[len(subtype)-1])})), nil + } + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + s, err := StructToSchema(f.Type, f.Tag) + if err != nil { + return nil, fmt.Errorf("StructToSchema: %w", err) + } + fName := f.Tag.Get("avro") + if len(fName) == 0 { + fName = f.Name + } else if fName == "-" { + continue + } + defaultVal := avroDefaultField(s) + var schField *avro.Field + if defaultVal != nil { + schField, err = avro.NewField(fName, s, avro.WithDefault(defaultVal)) + } else { + schField, err = avro.NewField(fName, s) + } + if err != nil { + return nil, fmt.Errorf("avro.NewField: %w", err) + } + schFields = append(schFields, schField) + } + name := t.Name() + if len(name) == 0 { + name = "anonymous" + } + return avro.NewRecordSchema(name, "", schFields) + case reflect.Map: + s, err := StructToSchema(t.Elem(), tags...) + if err != nil { + return nil, fmt.Errorf("StructToSchema: %w", err) + } + return avro.NewMapSchema(s), nil + case reflect.Slice, reflect.Array: + if t.Elem().Kind() == reflect.Uint8 { + if strings.Contains(strings.ToLower(t.Elem().String()), "decimal") { + return avro.NewPrimitiveSchema(avro.Bytes, avro.NewPrimitiveLogicalSchema(avro.Decimal)), nil + } + if strings.Contains(strings.ToLower(t.Elem().String()), "uuid") { + return avro.NewPrimitiveSchema(avro.String, avro.NewPrimitiveLogicalSchema(avro.UUID)), nil + } + return avro.NewPrimitiveSchema(avro.Bytes, nil), nil + } + s, err := StructToSchema(t.Elem(), tags...) + if err != nil { + return nil, fmt.Errorf("StructToSchema: %w", err) + } + return avro.NewArraySchema(s), nil + case reflect.Pointer: + n := avro.NewPrimitiveSchema(avro.Null, nil) + s, err := StructToSchema(t.Elem(), tags...) + if err != nil { + return nil, fmt.Errorf("StructToSchema: %w", err) + } + union, err := avro.NewUnionSchema([]avro.Schema{n, s}) + if err != nil { + return nil, fmt.Errorf("avro.NewUnionSchema: %v, type: %s", err, s.String()) + } + return union, nil + case reflect.Bool: + return avro.NewPrimitiveSchema(avro.Boolean, nil), nil + case reflect.Uint8, reflect.Int8: + return avro.NewPrimitiveSchema(avro.Bytes, nil), nil + case reflect.Int, reflect.Int16, reflect.Int32, reflect.Uint, reflect.Uint16, reflect.Uint32: + if strings.Contains(strings.ToLower(t.String()), "date") { + return avro.NewPrimitiveSchema(avro.Int, avro.NewPrimitiveLogicalSchema(avro.Date)), nil + } + if strings.Contains(strings.ToLower(t.String()), "time") { + return avro.NewPrimitiveSchema(avro.Int, avro.NewPrimitiveLogicalSchema(avro.TimeMillis)), nil + } + return avro.NewPrimitiveSchema(avro.Int, nil), nil + case reflect.Int64, reflect.Uint64: + if strings.Contains(strings.ToLower(t.String()), "duration") { + return avro.NewPrimitiveSchema(avro.Fixed, avro.NewPrimitiveLogicalSchema(avro.Duration)), nil + } + return avro.NewPrimitiveSchema(avro.Long, nil), nil + case reflect.Float32: + return avro.NewPrimitiveSchema(avro.Float, nil), nil + case reflect.Float64: + return avro.NewPrimitiveSchema(avro.Double, nil), nil + case reflect.String: + return avro.NewPrimitiveSchema(avro.String, nil), nil + default: + return nil, fmt.Errorf("unknown type %s", t.Kind().String()) + } +} + +func avroDefaultField(s avro.Schema) any { + switch s.Type() { + case avro.String, avro.Bytes, avro.Enum, avro.Fixed: + return "" + case avro.Boolean: + return false + case avro.Int: + return int(0) + case avro.Long: + return int64(0) + case avro.Float: + return float32(0.0) + case avro.Double: + return float64(0.0) + case avro.Map: + return make(map[string]any) + case avro.Array: + return []any{} + default: + return nil + } +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/serde/avrov2/avro_util.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/serde/avrov2/avro_util.go new file mode 100644 index 00000000..5075b83a --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/serde/avrov2/avro_util.go @@ -0,0 +1,278 @@ +/** + * Copyright 2024 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package avrov2 + +import ( + "fmt" + "github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/serde" + "github.com/hamba/avro/v2" + "github.com/modern-go/reflect2" + "reflect" + "strings" +) + +func transform(ctx serde.RuleContext, resolver *avro.TypeResolver, schema avro.Schema, msg *reflect.Value, + fieldTransform serde.FieldTransform) (*reflect.Value, error) { + if msg == nil || (msg.Kind() == reflect.Pointer && msg.IsNil()) || schema == nil { + return msg, nil + } + fieldCtx := ctx.CurrentField() + if fieldCtx != nil { + fieldCtx.Type = getType(schema) + } + switch schema.(type) { + case *avro.UnionSchema: + val := deref(msg) + subschema, err := resolveUnion(resolver, schema, val) + if err != nil { + return nil, err + } + return transform(ctx, resolver, subschema, msg, fieldTransform) + case *avro.ArraySchema: + val := deref(msg) + if val.Kind() != reflect.Slice { + return msg, nil + } + subschema := schema.(*avro.ArraySchema).Items() + for i := 0; i < val.Len(); i++ { + item := val.Index(i) + newVal, err := transform(ctx, resolver, subschema, &item, fieldTransform) + if err != nil { + return nil, err + } + item.Set(*newVal) + } + return msg, nil + case *avro.MapSchema: + val := deref(msg) + if val.Kind() != reflect.Map { + return msg, nil + } + subschema := schema.(*avro.MapSchema).Values() + iter := val.MapRange() + for iter.Next() { + k := iter.Key() + v := iter.Value() + newVal, err := transform(ctx, resolver, subschema, &v, fieldTransform) + if err != nil { + return nil, err + } + val.SetMapIndex(k, *newVal) + } + return msg, nil + case *avro.RecordSchema: + val := deref(msg) + recordSchema := schema.(*avro.RecordSchema) + if val.Kind() == reflect.Struct { + fieldByNames := fieldByNames(val) + for _, avroField := range recordSchema.Fields() { + structField, ok := fieldByNames[avroField.Name()] + if !ok { + return nil, fmt.Errorf("avro: missing field %s", avroField.Name()) + } + err := transformField(ctx, resolver, recordSchema, avroField, structField, val, fieldTransform) + if err != nil { + return nil, err + } + } + return msg, nil + } else if val.Kind() == reflect.Map { + for _, avroField := range recordSchema.Fields() { + mapField := val.MapIndex(reflect.ValueOf(avroField.Name())) + err := transformField(ctx, resolver, recordSchema, avroField, &mapField, val, fieldTransform) + if err != nil { + return nil, err + } + } + return msg, nil + } else { + return nil, fmt.Errorf("message of kind %s is not a struct or map", val.Kind()) + } + default: + if fieldCtx != nil { + ruleTags := ctx.Rule.Tags + if len(ruleTags) == 0 || !disjoint(ruleTags, fieldCtx.Tags) { + val := deref(msg) + newVal, err := fieldTransform.Transform(ctx, *fieldCtx, val.Interface()) + if err != nil { + return nil, err + } + result := reflect.ValueOf(newVal) + return &result, nil + } + } + return msg, nil + } +} + +func fieldByNames(value *reflect.Value) map[string]*reflect.Value { + fieldByNames := make(map[string]*reflect.Value, value.NumField()) + for i := 0; i < value.NumField(); i++ { + field := value.Field(i) + structField := value.Type().Field(i) + fieldName := structField.Name + if tag, ok := structField.Tag.Lookup("avro"); ok { + fieldName = tag + } + fieldByNames[fieldName] = &field + } + return fieldByNames +} + +func transformField(ctx serde.RuleContext, resolver *avro.TypeResolver, recordSchema *avro.RecordSchema, avroField *avro.Field, + structField *reflect.Value, val *reflect.Value, fieldTransform serde.FieldTransform) error { + fullName := recordSchema.FullName() + "." + avroField.Name() + defer ctx.LeaveField() + ctx.EnterField(val.Interface(), fullName, avroField.Name(), getType(avroField.Type()), getInlineTags(avroField)) + newVal, err := transform(ctx, resolver, avroField.Type(), structField, fieldTransform) + if err != nil { + return err + } + if ctx.Rule.Kind == "CONDITION" { + newBool := deref(newVal) + if newBool.Kind() == reflect.Bool && !newBool.Bool() { + return serde.RuleConditionErr{ + Rule: ctx.Rule, + } + } + } else { + if val.Kind() == reflect.Struct { + err = setField(structField, newVal) + if err != nil { + return err + } + } else { + val.SetMapIndex(reflect.ValueOf(avroField.Name()), *newVal) + } + } + return nil +} + +func getType(schema avro.Schema) serde.FieldType { + switch schema.Type() { + case avro.Record: + return serde.TypeRecord + case avro.Enum: + return serde.TypeEnum + case avro.Array: + return serde.TypeArray + case avro.Map: + return serde.TypeMap + case avro.Union: + return serde.TypeCombined + case avro.Fixed: + return serde.TypeFixed + case avro.String: + return serde.TypeString + case avro.Bytes: + return serde.TypeBytes + case avro.Int: + return serde.TypeInt + case avro.Long: + return serde.TypeLong + case avro.Float: + return serde.TypeFloat + case avro.Double: + return serde.TypeDouble + case avro.Boolean: + return serde.TypeBoolean + case avro.Null: + return serde.TypeNull + default: + return serde.TypeNull + } +} + +func getInlineTags(field *avro.Field) []string { + prop := field.Prop("confluent:tags") + val, ok := prop.([]interface{}) + if ok { + tags := make([]string, len(val)) + for i, v := range val { + tags[i] = fmt.Sprint(v) + } + return tags + } + return []string{} +} + +func disjoint(slice1 []string, map1 map[string]bool) bool { + for _, v := range slice1 { + if map1[v] { + return false + } + } + return true +} + +func getField(msg *reflect.Value, name string) (*reflect.Value, error) { + if msg.Kind() != reflect.Struct { + return nil, fmt.Errorf("message is not a struct") + } + fieldVal := msg.FieldByName(name) + return &fieldVal, nil +} + +// See https://stackoverflow.com/questions/64138199/how-to-set-a-struct-member-that-is-a-pointer-to-an-arbitrary-value-using-reflect +func setField(field *reflect.Value, value *reflect.Value) error { + if !field.CanSet() { + return fmt.Errorf("cannot assign to the given field") + } + if field.Kind() == reflect.Pointer && value.Kind() != reflect.Pointer { + x := reflect.New(field.Type().Elem()) + x.Elem().Set(*value) + field.Set(x) + } else { + field.Set(*value) + } + return nil +} + +func resolveUnion(resolver *avro.TypeResolver, schema avro.Schema, msg *reflect.Value) (avro.Schema, error) { + union := schema.(*avro.UnionSchema) + var names []string + var err error + if msg.IsValid() && msg.CanInterface() { + val := msg.Interface() + typ := reflect2.TypeOf(val) + names, err = resolver.Name(typ) + if err != nil { + return nil, err + } + } else { + names = []string{"null"} + } + for _, name := range names { + if idx := strings.Index(name, ":"); idx > 0 { + name = name[:idx] + } + + schema, _ = union.Types().Get(name) + if schema != nil { + return schema, nil + } + } + return nil, fmt.Errorf("avro: unknown union type %s", names[0]) +} + +func deref(val *reflect.Value) *reflect.Value { + if val.Kind() == reflect.Pointer { + v := val.Elem() + return &v + } + return val +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/serde/avrov2/config.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/serde/avrov2/config.go new file mode 100644 index 00000000..3fc220dd --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/serde/avrov2/config.go @@ -0,0 +1,49 @@ +/** + * Copyright 2022 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package avrov2 + +import ( + "github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/serde" +) + +// SerializerConfig is used to pass multiple configuration options to the serializers. +type SerializerConfig struct { + serde.SerializerConfig +} + +// NewSerializerConfig returns a new configuration instance with sane defaults. +func NewSerializerConfig() *SerializerConfig { + c := &SerializerConfig{ + SerializerConfig: *serde.NewSerializerConfig(), + } + + return c +} + +// DeserializerConfig is used to pass multiple configuration options to the deserializers. +type DeserializerConfig struct { + serde.DeserializerConfig +} + +// NewDeserializerConfig returns a new configuration instance with sane defaults. +func NewDeserializerConfig() *DeserializerConfig { + c := &DeserializerConfig{ + DeserializerConfig: *serde.NewDeserializerConfig(), + } + + return c +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/serde/config.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/serde/config.go new file mode 100644 index 00000000..234ecda6 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/serde/config.go @@ -0,0 +1,62 @@ +/** + * Copyright 2022 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package serde + +// SerializerConfig is used to pass multiple configuration options to the serializers. +type SerializerConfig struct { + // AutoRegisterSchemas determines whether to automatically register schemas during serialization + AutoRegisterSchemas bool + // UseSchemaID specifies a schema ID to use during serialization + UseSchemaID int + // UseLatestVersion specifies whether to use the latest schema version during serialization + UseLatestVersion bool + // UseLatestWithMetadata specifies whether to use the latest schema with metadata during serialization + UseLatestWithMetadata map[string]string + // NormalizeSchemas determines whether to normalize schemas during serialization + NormalizeSchemas bool + // RuleConfig specifies configuration options to the rules + RuleConfig map[string]string +} + +// NewSerializerConfig returns a new configuration instance with sane defaults. +func NewSerializerConfig() *SerializerConfig { + c := &SerializerConfig{} + + c.AutoRegisterSchemas = true + c.UseSchemaID = -1 + c.UseLatestVersion = false + c.NormalizeSchemas = false + + return c +} + +// DeserializerConfig is used to pass multiple configuration options to the deserializers. +type DeserializerConfig struct { + // UseLatestVersion specifies whether to use the latest schema version during deserialization + UseLatestVersion bool + // UseLatestWithMetadata specifies whether to use the latest schema with metadata during serialization + UseLatestWithMetadata map[string]string + // RuleConfig specifies configuration options to the rules + RuleConfig map[string]string +} + +// NewDeserializerConfig returns a new configuration instance with sane defaults. +func NewDeserializerConfig() *DeserializerConfig { + c := &DeserializerConfig{} + + return c +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/serde/rule_registry.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/serde/rule_registry.go new file mode 100644 index 00000000..8d374da8 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/serde/rule_registry.go @@ -0,0 +1,117 @@ +/** + * Copyright 2024 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package serde + +import ( + "sync" +) + +var ( + globalInstance = RuleRegistry{ + ruleExecutors: make(map[string]RuleExecutor), + ruleActions: make(map[string]RuleAction), + } +) + +// RuleRegistry is used to store all registered rule executors and actions. +type RuleRegistry struct { + ruleExecutorsMu sync.RWMutex + ruleExecutors map[string]RuleExecutor + ruleActionsMu sync.RWMutex + ruleActions map[string]RuleAction +} + +// RegisterExecutor is used to register a new rule executor. +func (r *RuleRegistry) RegisterExecutor(ruleExecutor RuleExecutor) { + r.ruleExecutorsMu.Lock() + defer r.ruleExecutorsMu.Unlock() + r.ruleExecutors[ruleExecutor.Type()] = ruleExecutor +} + +// GetExecutor fetches a rule executor by a given name. +func (r *RuleRegistry) GetExecutor(name string) RuleExecutor { + r.ruleExecutorsMu.RLock() + defer r.ruleExecutorsMu.RUnlock() + return r.ruleExecutors[name] +} + +// GetExecutors fetches all rule executors +func (r *RuleRegistry) GetExecutors() []RuleExecutor { + r.ruleExecutorsMu.RLock() + defer r.ruleExecutorsMu.RUnlock() + var result []RuleExecutor + for _, v := range r.ruleExecutors { + result = append(result, v) + } + return result +} + +// RegisterAction is used to register a new global rule action. +func (r *RuleRegistry) RegisterAction(ruleAction RuleAction) { + r.ruleActionsMu.Lock() + defer r.ruleActionsMu.Unlock() + r.ruleActions[ruleAction.Type()] = ruleAction +} + +// GetAction fetches a rule action by a given name. +func (r *RuleRegistry) GetAction(name string) RuleAction { + r.ruleActionsMu.RLock() + defer r.ruleActionsMu.RUnlock() + return r.ruleActions[name] +} + +// GetActions fetches all rule actions +func (r *RuleRegistry) GetActions() []RuleAction { + r.ruleActionsMu.RLock() + defer r.ruleActionsMu.RUnlock() + var result []RuleAction + for _, v := range r.ruleActions { + result = append(result, v) + } + return result +} + +// Clear clears all registered rules +func (r *RuleRegistry) Clear() { + r.ruleActionsMu.Lock() + defer r.ruleActionsMu.Unlock() + for k, v := range r.ruleActions { + _ = v.Close() + delete(r.ruleActions, k) + } + r.ruleExecutorsMu.Lock() + defer r.ruleExecutorsMu.Unlock() + for k, v := range r.ruleExecutors { + _ = v.Close() + delete(r.ruleExecutors, k) + } +} + +// GlobalRuleRegistry returns the global rule registry. +func GlobalRuleRegistry() *RuleRegistry { + return &globalInstance +} + +// RegisterRuleExecutor is used to register a new global rule executor. +func RegisterRuleExecutor(ruleExecutor RuleExecutor) { + globalInstance.RegisterExecutor(ruleExecutor) +} + +// RegisterRuleAction is used to register a new global rule action. +func RegisterRuleAction(ruleAction RuleAction) { + globalInstance.RegisterAction(ruleAction) +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/serde/serde.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/serde/serde.go new file mode 100644 index 00000000..e5a3e5c4 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/serde/serde.go @@ -0,0 +1,844 @@ +/** + * Copyright 2022 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package serde + +import ( + "bytes" + "encoding/binary" + "fmt" + "log" + "reflect" + "strings" + + "github.com/confluentinc/confluent-kafka-go/v2/schemaregistry" +) + +// Type represents the type of Serde +type Type = int + +const ( + // KeySerde denotes a key Serde + KeySerde = 1 + // ValueSerde denotes a value Serde + ValueSerde = 2 +) + +const ( + // EnableValidation enables validation + EnableValidation = true + // DisableValidation disables validation + DisableValidation = false +) + +// MagicByte is prepended to the serialized payload +const MagicByte byte = 0x0 + +// MessageFactory is a factory function, which should return a pointer to +// an instance into which we will unmarshal wire data. +// For Avro, the name will be the name of the Avro type if it has one. +// For JSON Schema, the name will be empty/F. +// For Protobuf, the name will be the name of the message type. +type MessageFactory func(subject string, name string) (interface{}, error) + +// Serializer represents a serializer +type Serializer interface { + ConfigureSerializer(client schemaregistry.Client, serdeType Type, + conf *SerializerConfig) error + // Serialize will serialize the given message, which should be a pointer. + // For example, in Protobuf, messages are always a pointer to a struct and never just a struct. + Serialize(topic string, msg interface{}) ([]byte, error) + Close() error +} + +// Deserializer represents a deserializer +type Deserializer interface { + ConfigureDeserializer(client schemaregistry.Client, serdeType Type, + conf *DeserializerConfig) error + // Deserialize will call the MessageFactory to create an object + // into which we will unmarshal data. + Deserialize(topic string, payload []byte) (interface{}, error) + // DeserializeInto will unmarshal data into the given object. + DeserializeInto(topic string, payload []byte, msg interface{}) error + Close() error +} + +// Serde is a common instance for both the serializers and deserializers +type Serde struct { + Client schemaregistry.Client + SerdeType Type + SubjectNameStrategy SubjectNameStrategyFunc + MessageFactory MessageFactory + FieldTransformer FieldTransformer + RuleRegistry *RuleRegistry +} + +// BaseSerializer represents basic serializer info +type BaseSerializer struct { + Serde + Conf *SerializerConfig +} + +// BaseDeserializer represents basic deserializer info +type BaseDeserializer struct { + Serde + Conf *DeserializerConfig +} + +// RuleContext represents a rule context +type RuleContext struct { + Source *schemaregistry.SchemaInfo + Target *schemaregistry.SchemaInfo + Subject string + Topic string + IsKey bool + RuleMode schemaregistry.RuleMode + Rule *schemaregistry.Rule + Index int + Rules []schemaregistry.Rule + FieldTransformer FieldTransformer + fieldContexts []FieldContext +} + +// GetParameter returns a parameter by name +func (r *RuleContext) GetParameter(name string) *string { + params := r.Rule.Params + value, ok := params[name] + if ok { + return &value + } + metadata := r.Target.Metadata + if metadata != nil { + value, ok = metadata.Properties[name] + if ok { + return &value + } + } + return nil +} + +// CurrentField returns the current field context +func (r *RuleContext) CurrentField() *FieldContext { + size := len(r.fieldContexts) + if size == 0 { + return nil + } + return &r.fieldContexts[size-1] +} + +// EnterField enters a field context +func (r *RuleContext) EnterField(containingMessage interface{}, fullName string, + name string, fieldType FieldType, tags []string) (FieldContext, bool) { + allTags := make(map[string]bool) + for _, v := range tags { + allTags[v] = true + } + for k, v := range r.GetTags(fullName) { + allTags[k] = v + } + fieldContext := FieldContext{ + ContainingMessage: containingMessage, + FullName: fullName, + Name: name, + Type: fieldType, + Tags: allTags, + } + r.fieldContexts = append(r.fieldContexts, fieldContext) + return fieldContext, true +} + +// GetTags returns tags for a full name +func (r *RuleContext) GetTags(fullName string) map[string]bool { + tags := make(map[string]bool) + metadata := r.Target.Metadata + if metadata != nil && metadata.Tags != nil { + for k, v := range metadata.Tags { + if match(fullName, k) { + for _, tag := range v { + tags[tag] = true + } + } + } + } + return tags +} + +// LeaveField leaves a field context +func (r *RuleContext) LeaveField() { + size := len(r.fieldContexts) - 1 + r.fieldContexts = r.fieldContexts[:size] +} + +// RuleBase represents a rule base +type RuleBase interface { + Configure(clientConfig *schemaregistry.Config, config map[string]string) error + Type() string + Close() error +} + +// RuleExecutor represents a rule executor +type RuleExecutor interface { + RuleBase + Transform(ctx RuleContext, msg interface{}) (interface{}, error) +} + +// FieldTransformer represents a field transformer +type FieldTransformer func(ctx RuleContext, fieldTransform FieldTransform, msg interface{}) (interface{}, error) + +// FieldTransform represents a field transform +type FieldTransform interface { + Transform(ctx RuleContext, fieldCtx FieldContext, fieldValue interface{}) (interface{}, error) +} + +// FieldRuleExecutor represents a field rule executor +type FieldRuleExecutor interface { + RuleExecutor + NewTransform(ctx RuleContext) (FieldTransform, error) +} + +// AbstractFieldRuleExecutor represents an abstract field rule executor +type AbstractFieldRuleExecutor struct { + FieldRuleExecutor +} + +// Transform transforms the message using the rule +func (a *AbstractFieldRuleExecutor) Transform(ctx RuleContext, msg interface{}) (interface{}, error) { + // TODO preserve source? + switch ctx.RuleMode { + case schemaregistry.Write, schemaregistry.Upgrade: + for i := 0; i < ctx.Index; i++ { + otherRule := ctx.Rules[i] + if areTransformsWithSameTag(*ctx.Rule, otherRule) { + // ignore this transform if an earlier one has the same tag + return msg, nil + } + } + case schemaregistry.Read, schemaregistry.Downgrade: + for i := ctx.Index + 1; i < len(ctx.Rules); i++ { + otherRule := ctx.Rules[i] + if areTransformsWithSameTag(*ctx.Rule, otherRule) { + // ignore this transform if a later one has the same tag + return msg, nil + } + } + } + + fieldTransform, err := a.NewTransform(ctx) + if err != nil { + return nil, err + } + // TODO preserve source? + return ctx.FieldTransformer(ctx, fieldTransform, msg) +} + +func areTransformsWithSameTag(rule1 schemaregistry.Rule, rule2 schemaregistry.Rule) bool { + return len(rule1.Tags) > 0 && rule1.Kind == "TRANSFORM" && rule1.Kind == rule2.Kind && rule1.Mode == rule2.Mode && + rule1.Type == rule2.Type && reflect.DeepEqual(rule1.Tags, rule2.Tags) +} + +// FieldContext represents a field context +type FieldContext struct { + ContainingMessage interface{} + FullName string + Name string + Type FieldType + Tags map[string]bool +} + +// FieldType represents the field type +type FieldType = int + +const ( + // TypeRecord represents a record + TypeRecord = 1 + // TypeEnum represents an enum + TypeEnum = 2 + // TypeArray represents an array + TypeArray = 3 + // TypeMap represents a map + TypeMap = 4 + // TypeCombined represents a combined + TypeCombined = 5 + // TypeFixed represents a fixed + TypeFixed = 6 + // TypeString represents a string + TypeString = 7 + // TypeBytes represents bytes + TypeBytes = 8 + // TypeInt represents an int + TypeInt = 9 + // TypeLong represents a long + TypeLong = 10 + // TypeFloat represents a float + TypeFloat = 11 + // TypeDouble represents a double + TypeDouble = 12 + // TypeBoolean represents a Boolean + TypeBoolean = 13 + // TypeNull represents a null + TypeNull = 14 +) + +// IsPrimitive returns true if the field is a primitive +func (f *FieldContext) IsPrimitive() bool { + t := f.Type + return t == TypeString || t == TypeBytes || t == TypeInt || t == TypeLong || + t == TypeFloat || t == TypeDouble || t == TypeBoolean || t == TypeNull +} + +// TypeName returns the type name +func (f *FieldContext) TypeName() string { + switch f.Type { + case TypeRecord: + return "RECORD" + case TypeEnum: + return "ENUM" + case TypeArray: + return "ARRAY" + case TypeMap: + return "MAP" + case TypeCombined: + return "COMBINED" + case TypeFixed: + return "FIXED" + case TypeString: + return "STRING" + case TypeBytes: + return "BYTES" + case TypeInt: + return "INT" + case TypeLong: + return "LONG" + case TypeFloat: + return "FLOAT" + case TypeDouble: + return "DOUBLE" + case TypeBoolean: + return "BOOLEAN" + case TypeNull: + return "NULL" + } + return "" +} + +// RuleAction represents a rule action +type RuleAction interface { + RuleBase + Run(ctx RuleContext, msg interface{}, err error) error +} + +// ErrorAction represents an error action +type ErrorAction struct { +} + +// NoneAction represents a no-op action +type NoneAction struct { +} + +// RuleConditionErr represents a rule condition error +type RuleConditionErr struct { + Rule *schemaregistry.Rule + Err error +} + +// Error returns the error message +func (re RuleConditionErr) Error() string { + errMsg := re.Rule.Doc + if errMsg == "" { + if re.Rule.Expr != "" { + return "Expr failed: '" + re.Rule.Expr + "'" + } + return "Condition failed: '" + re.Rule.Name + "'" + } + return errMsg +} + +// ConfigureSerializer configures the Serializer +func (s *BaseSerializer) ConfigureSerializer(client schemaregistry.Client, serdeType Type, + conf *SerializerConfig) error { + if client == nil { + return fmt.Errorf("schema registry client missing") + } + s.Client = client + s.Conf = conf + s.SerdeType = serdeType + s.SubjectNameStrategy = TopicNameStrategy + return nil +} + +// ConfigureDeserializer configures the Deserializer +func (s *BaseDeserializer) ConfigureDeserializer(client schemaregistry.Client, serdeType Type, + conf *DeserializerConfig) error { + if client == nil { + return fmt.Errorf("schema registry client missing") + } + s.Client = client + s.Conf = conf + s.SerdeType = serdeType + s.SubjectNameStrategy = TopicNameStrategy + return nil +} + +// SubjectNameStrategyFunc determines the subject for the given parameters +type SubjectNameStrategyFunc func(topic string, serdeType Type, schema schemaregistry.SchemaInfo) (string, error) + +// TopicNameStrategy creates a subject name by appending -[key|value] to the topic name. +func TopicNameStrategy(topic string, serdeType Type, schema schemaregistry.SchemaInfo) (string, error) { + suffix := "-value" + if serdeType == KeySerde { + suffix = "-key" + } + return topic + suffix, nil +} + +// GetID returns a schema ID for the given schema +func (s *BaseSerializer) GetID(topic string, msg interface{}, info *schemaregistry.SchemaInfo) (int, error) { + autoRegister := s.Conf.AutoRegisterSchemas + useSchemaID := s.Conf.UseSchemaID + useLatestWithMetadata := s.Conf.UseLatestWithMetadata + useLatest := s.Conf.UseLatestVersion + normalizeSchema := s.Conf.NormalizeSchemas + + var id = -1 + subject, err := s.SubjectNameStrategy(topic, s.SerdeType, *info) + if err != nil { + return -1, err + } + if autoRegister { + id, err = s.Client.Register(subject, *info, normalizeSchema) + if err != nil { + return -1, err + } + } else if useSchemaID >= 0 { + *info, err = s.Client.GetBySubjectAndID(subject, useSchemaID) + if err != nil { + return -1, err + } + id = useSchemaID + } else if len(useLatestWithMetadata) != 0 { + metadata, err := s.Client.GetLatestWithMetadata(subject, useLatestWithMetadata, true) + if err != nil { + return -1, err + } + *info = metadata.SchemaInfo + id = metadata.ID + } else if useLatest { + metadata, err := s.Client.GetLatestSchemaMetadata(subject) + if err != nil { + return -1, err + } + *info = metadata.SchemaInfo + id = metadata.ID + } else { + id, err = s.Client.GetID(subject, *info, normalizeSchema) + if err != nil { + return -1, err + } + } + return id, nil +} + +// SetRuleRegistry sets the rule registry +func (s *Serde) SetRuleRegistry(registry *RuleRegistry, ruleConfig map[string]string) error { + s.RuleRegistry = registry + for _, rule := range registry.GetExecutors() { + err := rule.Configure(s.Client.Config(), ruleConfig) + if err != nil { + return err + } + } + return nil +} + +// GetMigrations returns the migration rules for the given subject +func (s *Serde) GetMigrations(subject string, topic string, sourceInfo *schemaregistry.SchemaInfo, + target *schemaregistry.SchemaMetadata, msg interface{}) ([]Migration, error) { + version, err := s.Client.GetVersion(subject, *sourceInfo, false) + if err != nil { + return nil, err + } + source := &schemaregistry.SchemaMetadata{ + SchemaInfo: *sourceInfo, + Version: version, + } + var migrationMode schemaregistry.RuleMode + var migrations []Migration + var first *schemaregistry.SchemaMetadata + var last *schemaregistry.SchemaMetadata + if source.Version < target.Version { + migrationMode = schemaregistry.Upgrade + first = source + last = target + } else if source.Version > target.Version { + migrationMode = schemaregistry.Downgrade + first = target + last = source + } else { + return migrations, nil + } + var previous *schemaregistry.SchemaMetadata + versions, err := s.getSchemasBetween(subject, first, last) + if err != nil { + return nil, err + } + for i, version := range versions { + if i == 0 { + previous = version + continue + } + if version.RuleSet != nil && version.RuleSet.HasRules(migrationMode) { + var m Migration + if migrationMode == schemaregistry.Upgrade { + m = Migration{ + RuleMode: migrationMode, + Source: previous, + Target: version, + } + } else { + m = Migration{ + RuleMode: migrationMode, + Source: version, + Target: previous, + } + } + migrations = append(migrations, m) + } + previous = version + } + if migrationMode == schemaregistry.Downgrade { + // Reverse the order of migrations for symmetry + for i, j := 0, len(migrations)-1; i < j; i, j = i+1, j-1 { + migrations[i], migrations[j] = migrations[j], migrations[i] + } + } + return migrations, nil +} + +func (s *Serde) getSchemasBetween(subject string, first *schemaregistry.SchemaMetadata, + last *schemaregistry.SchemaMetadata) ([]*schemaregistry.SchemaMetadata, error) { + if last.Version-first.Version <= 1 { + return []*schemaregistry.SchemaMetadata{first, last}, nil + } + version1 := first.Version + version2 := last.Version + result := []*schemaregistry.SchemaMetadata{first} + for i := version1 + 1; i < version2; i++ { + meta, err := s.Client.GetSchemaMetadataIncludeDeleted(subject, i, true) + if err != nil { + return nil, err + } + result = append(result, &meta) + } + result = append(result, last) + return result, nil +} + +// Migration represents a migration +type Migration struct { + RuleMode schemaregistry.RuleMode + Source *schemaregistry.SchemaMetadata + Target *schemaregistry.SchemaMetadata +} + +// ExecuteMigrations executes the given migrations +func (s *Serde) ExecuteMigrations(migrations []Migration, subject string, topic string, msg interface{}) (interface{}, error) { + var err error + for _, migration := range migrations { + msg, err = s.ExecuteRules(subject, topic, migration.RuleMode, + &migration.Source.SchemaInfo, &migration.Target.SchemaInfo, msg) + if err != nil { + return nil, err + } + } + return msg, nil +} + +// ExecuteRules executes the given rules +func (s *Serde) ExecuteRules(subject string, topic string, ruleMode schemaregistry.RuleMode, + source *schemaregistry.SchemaInfo, target *schemaregistry.SchemaInfo, msg interface{}) (interface{}, error) { + if msg == nil || target == nil { + return msg, nil + } + var rules []schemaregistry.Rule + switch ruleMode { + case schemaregistry.Upgrade: + if target.RuleSet != nil { + rules = target.RuleSet.MigrationRules + } + case schemaregistry.Downgrade: + if source.RuleSet != nil { + // Execute downgrade rules in reverse order for symmetry + rules = reverseRules(source.RuleSet.MigrationRules) + } + default: + if target.RuleSet != nil { + rules = target.RuleSet.DomainRules + if ruleMode == schemaregistry.Read { + // Execute read rules in reverse order for symmetry + rules = reverseRules(rules) + } + } + } + for i, rule := range rules { + if rule.Disabled { + continue + } + mode, ok := schemaregistry.ParseMode(rule.Mode) + if !ok { + continue + } + switch mode { + case schemaregistry.WriteRead: + if ruleMode != schemaregistry.Write && ruleMode != schemaregistry.Read { + continue + } + case schemaregistry.UpDown: + if ruleMode != schemaregistry.Upgrade && ruleMode != schemaregistry.Downgrade { + continue + } + default: + if mode != ruleMode { + continue + } + } + ctx := RuleContext{ + Source: source, + Target: target, + Subject: subject, + Topic: topic, + IsKey: s.SerdeType == KeySerde, + RuleMode: ruleMode, + Rule: &rule, + Index: i, + Rules: rules, + FieldTransformer: s.FieldTransformer, + } + ruleExecutor := s.RuleRegistry.GetExecutor(rule.Type) + if ruleExecutor == nil { + err := s.runAction(ctx, ruleMode, rule, rule.OnFailure, msg, + fmt.Errorf("could not find rule executor of type %s", rule.Type), "ERROR") + if err != nil { + return nil, err + } + return msg, nil + } + var err error + result, err := ruleExecutor.Transform(ctx, msg) + if result == nil || err != nil { + err = s.runAction(ctx, ruleMode, rule, rule.OnFailure, msg, err, "ERROR") + if err != nil { + return nil, err + } + } else { + switch rule.Kind { + case "CONDITION": + condResult, ok2 := result.(bool) + if ok2 && !condResult { + err = s.runAction(ctx, ruleMode, rule, rule.OnFailure, msg, err, "ERROR") + if err != nil { + return nil, RuleConditionErr{ + Rule: ctx.Rule, + Err: err, + } + } + } + case "TRANSFORM": + msg = result + } + // ignore error, since rule succeeded + _ = s.runAction(ctx, ruleMode, rule, rule.OnSuccess, msg, nil, "NONE") + } + } + return msg, nil +} + +func reverseRules(rules []schemaregistry.Rule) []schemaregistry.Rule { + newRules := make([]schemaregistry.Rule, len(rules)) + copy(newRules, rules) + // Execute downgrade rules in reverse order for symmetry + for i, j := 0, len(newRules)-1; i < j; i, j = i+1, j-1 { + newRules[i], newRules[j] = newRules[j], newRules[i] + } + return newRules +} + +func (s *Serde) runAction(ctx RuleContext, ruleMode schemaregistry.RuleMode, rule schemaregistry.Rule, + action string, msg interface{}, err error, defaultAction string) error { + actionName := s.getRuleActionName(rule, ruleMode, action) + if actionName == nil { + actionName = &defaultAction + } + ruleAction := s.getRuleAction(ctx, *actionName) + if ruleAction == nil { + log.Printf("could not find rule action of type %s", *actionName) + return fmt.Errorf("could not find rule action of type %s", *actionName) + } + e := ruleAction.Run(ctx, msg, err) + if e != nil { + log.Printf("WARN: could not run post-rule action %s: %v", *actionName, e) + return e + } + return nil +} + +func (s *Serde) getRuleActionName(rule schemaregistry.Rule, ruleMode schemaregistry.RuleMode, actionName string) *string { + if actionName == "" { + return nil + } + mode, ok := schemaregistry.ParseMode(rule.Mode) + if !ok { + return nil + } + if (mode == schemaregistry.WriteRead || mode == schemaregistry.UpDown) && strings.Contains(actionName, ",") { + parts := strings.Split(actionName, ",") + switch ruleMode { + case schemaregistry.Write, schemaregistry.Upgrade: + return &parts[0] + case schemaregistry.Read, schemaregistry.Downgrade: + return &parts[1] + default: + return nil + } + } + return &actionName +} + +func (s *Serde) getRuleAction(_ RuleContext, actionName string) RuleAction { + if actionName == "ERROR" { + return ErrorAction{} + } else if actionName == "NONE" { + return NoneAction{} + } else { + return s.RuleRegistry.GetAction(actionName) + } +} + +// WriteBytes writes the serialized payload prepended by the MagicByte +func (s *BaseSerializer) WriteBytes(id int, msgBytes []byte) ([]byte, error) { + var buf bytes.Buffer + err := buf.WriteByte(MagicByte) + if err != nil { + return nil, err + } + idBytes := make([]byte, 4) + binary.BigEndian.PutUint32(idBytes, uint32(id)) + _, err = buf.Write(idBytes) + if err != nil { + return nil, err + } + _, err = buf.Write(msgBytes) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// GetSchema returns a schema for a payload +func (s *BaseDeserializer) GetSchema(topic string, payload []byte) (schemaregistry.SchemaInfo, error) { + info := schemaregistry.SchemaInfo{} + if payload[0] != MagicByte { + return info, fmt.Errorf("unknown magic byte") + } + id := binary.BigEndian.Uint32(payload[1:5]) + subject, err := s.SubjectNameStrategy(topic, s.SerdeType, info) + if err != nil { + return info, err + } + return s.Client.GetBySubjectAndID(subject, int(id)) +} + +// GetReaderSchema returns a schema for reading +func (s *BaseDeserializer) GetReaderSchema(subject string) (*schemaregistry.SchemaMetadata, error) { + useLatestWithMetadata := s.Conf.UseLatestWithMetadata + useLatest := s.Conf.UseLatestVersion + if len(useLatestWithMetadata) != 0 { + meta, err := s.Client.GetLatestWithMetadata(subject, useLatestWithMetadata, true) + if err != nil { + return nil, err + } + return &meta, nil + } + if useLatest { + meta, err := s.Client.GetLatestSchemaMetadata(subject) + if err != nil { + return nil, err + } + return &meta, nil + } + return nil, nil +} + +// ResolveReferences resolves schema references +func ResolveReferences(c schemaregistry.Client, schema schemaregistry.SchemaInfo, deps map[string]string) error { + for _, ref := range schema.References { + metadata, err := c.GetSchemaMetadataIncludeDeleted(ref.Subject, ref.Version, true) + if err != nil { + return err + } + info := metadata.SchemaInfo + deps[ref.Name] = metadata.Schema + err = ResolveReferences(c, info, deps) + if err != nil { + return err + } + } + return nil +} + +// Close closes the Serde +func (s *Serde) Close() error { + return nil +} + +// Configure configures the action +func (a ErrorAction) Configure(clientConfig *schemaregistry.Config, config map[string]string) error { + return nil +} + +// Type returns the type +func (a ErrorAction) Type() string { + return "ERROR" +} + +// Run runs the action +func (a ErrorAction) Run(ctx RuleContext, msg interface{}, err error) error { + return fmt.Errorf("rule %s failed: %w", ctx.Rule.Name, err) +} + +// Close closes the action +func (a ErrorAction) Close() error { + return nil +} + +// Configure configures the action +func (a NoneAction) Configure(clientConfig *schemaregistry.Config, config map[string]string) error { + return nil +} + +// Type returns the type +func (a NoneAction) Type() string { + return "NONE" +} + +// Run runs the action +func (a NoneAction) Run(ctx RuleContext, msg interface{}, err error) error { + return nil +} + +// Close closes the action +func (a NoneAction) Close() error { + return nil +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/serde/testhelpers.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/serde/testhelpers.go new file mode 100644 index 00000000..9ab0195b --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/serde/testhelpers.go @@ -0,0 +1,73 @@ +/** + * Copyright 2022 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package serde + +import ( + "fmt" + "reflect" + "runtime" + "testing" +) + +// FailFunc is a function to call in case of failure +type FailFunc func(string, ...error) + +// MaybeFail represents a fail function +var MaybeFail FailFunc + +// InitFailFunc returns an initial fail function +func InitFailFunc(t *testing.T) FailFunc { + tester := t + return func(msg string, errors ...error) { + for _, err := range errors { + if err != nil { + pc := make([]uintptr, 1) + runtime.Callers(2, pc) + caller := runtime.FuncForPC(pc[0]) + _, line := caller.FileLine(caller.Entry()) + + tester.Fatalf("%s:%d failed: %s %s", caller.Name(), line, msg, err) + } + } + } +} + +// InitFailFuncBenchmark returns an initial fail function +func InitFailFuncBenchmark(b *testing.B) FailFunc { + tester := b + return func(msg string, errors ...error) { + for _, err := range errors { + if err != nil { + pc := make([]uintptr, 1) + runtime.Callers(2, pc) + caller := runtime.FuncForPC(pc[0]) + _, line := caller.FileLine(caller.Entry()) + + tester.Fatalf("%s:%d failed: %s %s", caller.Name(), line, msg, err) + } + } + } +} + +// Expect compares the actual and expected values +func Expect(actual, expected interface{}) error { + if !reflect.DeepEqual(actual, expected) { + return fmt.Errorf("expected: %v, actual: %v", expected, actual) + } + + return nil +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/serde/wildcard_matcher.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/serde/wildcard_matcher.go new file mode 100644 index 00000000..a88acbd6 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/serde/wildcard_matcher.go @@ -0,0 +1,104 @@ +/** + * Copyright 2024 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package serde + +import ( + "regexp" + "strings" +) + +/** + * Matches fully-qualified names that use dot (.) as the name boundary. + * + *

A '?' matches a single character. + * A '*' matches one or more characters within a name boundary. + * A '**' matches one or more characters across name boundaries. + * + *

Examples: + *

+ * wildcardMatch("eve", "eve*")                  --> true
+ * wildcardMatch("alice.bob.eve", "a*.bob.eve")  --> true
+ * wildcardMatch("alice.bob.eve", "a*.bob.e*")   --> true
+ * wildcardMatch("alice.bob.eve", "a*")          --> false
+ * wildcardMatch("alice.bob.eve", "a**")         --> true
+ * wildcardMatch("alice.bob.eve", "alice.bob*")  --> false
+ * wildcardMatch("alice.bob.eve", "alice.bob**") --> true
+ * 
+ * + * @param str the string to match on + * @param wildcardMatcher the wildcard string to match against + * @return true if the string matches the wildcard string + */ +func match(str string, wildcardMatcher string) bool { + re := wildcardToRegexp(wildcardMatcher, '.') + pattern, err := regexp.Compile(re) + if err != nil { + return false + } + idx := pattern.FindStringIndex(str) + return idx != nil && idx[0] == 0 && idx[1] == len(str) +} + +func wildcardToRegexp(globExp string, separator rune) string { + var dst strings.Builder + chars := strings.ReplaceAll(globExp, "**"+string(separator)+"*", "**") + src := []rune(chars) + i, size := 0, len(src) + for i < size { + c := src[i] + i++ + switch c { + case '*': + // One char lookahead for ** + if i < size && src[i] == '*' { + dst.WriteString(".*") + i++ + } else { + dst.WriteString("[^") + dst.WriteRune(separator) + dst.WriteString("]*") + } + case '?': + dst.WriteString("[^") + dst.WriteRune(separator) + dst.WriteString("]") + case '.', '+', '{', '}', '(', ')', '|', '^', '$': + // These need to be escaped in regular expressions + dst.WriteRune('\\') + dst.WriteRune(c) + case '\\': + i = doubleSlashes(dst, src, i) + default: + dst.WriteRune(c) + } + } + return dst.String() +} + +func doubleSlashes(dst strings.Builder, src []rune, i int) int { + // Emit the next character without special interpretation + dst.WriteRune('\\') + if i+1 < len(src) { + dst.WriteRune('\\') + dst.WriteRune(src[i]) + i++ + } else { + // A backslash at the very end is treated like an escaped backslash + dst.WriteRune('\\') + } + return i +} diff --git a/vendor/github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/testhelpers.go b/vendor/github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/testhelpers.go new file mode 100644 index 00000000..60beb955 --- /dev/null +++ b/vendor/github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/testhelpers.go @@ -0,0 +1,104 @@ +/** + * Copyright 2022 Confluent Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package schemaregistry + +import ( + "encoding/json" + "fmt" + "os" + "reflect" + "runtime" + "testing" +) + +type testConf map[string]interface{} +type failFunc func(string, ...error) + +var testconf = make(testConf) +var srClient Client +var maybeFail failFunc + +// NewTestConf reads the test suite config file testconf.json which must +// contain at least Brokers and Topic string properties. +// Returns Testconf if the testconf was found and usable, +// error if file can't be read correctly +func testconfRead() bool { + cf, err := os.Open("../kafka/testconf.json") + defer cf.Close() + if err != nil { + fmt.Fprintf(os.Stderr, "%s testconf.json not found - ignoring test\n", err) + return false + } + + jp := json.NewDecoder(cf) + err = jp.Decode(&testconf) + + if err != nil { + panic(fmt.Sprintf("Failed to parse testconf: %s", err)) + } + + return true +} + +// getObject returns a child object of the root testConf +func (tc testConf) getObject(name string) testConf { + return tc[name].(map[string]interface{}) +} + +// getString returns a string representation of the value represented by key from the provided namespace +// if the namespace is an empty string the root object will be searched. +func (tc testConf) getString(key string) string { + val, ok := tc[key] + if ok { + return val.(string) + } + return "" +} + +// getInt returns an integer representation of the value represented by key from the provided namespace +// If the namespace is an empty string the root object will be searched. +func (tc testConf) getInt(key string) int { + val, ok := tc[key] + if ok { + return val.(int) + } + return 0 +} + +func initFailFunc(t *testing.T) failFunc { + tester := t + return func(msg string, errors ...error) { + for _, err := range errors { + if err != nil { + pc := make([]uintptr, 1) + runtime.Callers(2, pc) + caller := runtime.FuncForPC(pc[0]) + _, line := caller.FileLine(caller.Entry()) + + tester.Fatalf("%s:%d failed: %s %s", caller.Name(), line, msg, err) + } + } + } +} + +func expect(actual, expected interface{}) error { + if !reflect.DeepEqual(actual, expected) { + return fmt.Errorf("expected: %v, Actual: %v", expected, actual) + } + + return nil +} diff --git a/vendor/github.com/grafana/regexp/.gitignore b/vendor/github.com/grafana/regexp/.gitignore new file mode 100644 index 00000000..66fd13c9 --- /dev/null +++ b/vendor/github.com/grafana/regexp/.gitignore @@ -0,0 +1,15 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ diff --git a/vendor/github.com/grafana/regexp/LICENSE b/vendor/github.com/grafana/regexp/LICENSE new file mode 100644 index 00000000..6a66aea5 --- /dev/null +++ b/vendor/github.com/grafana/regexp/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/grafana/regexp/README.md b/vendor/github.com/grafana/regexp/README.md new file mode 100644 index 00000000..756e60dc --- /dev/null +++ b/vendor/github.com/grafana/regexp/README.md @@ -0,0 +1,12 @@ +# Grafana Go regexp package +This repo is a fork of the upstream Go `regexp` package, with some code optimisations to make it run faster. + +All the optimisations have been submitted upstream, but not yet merged. + +All semantics are the same, and the optimised code passes all tests from upstream. + +The `main` branch is non-optimised: switch over to [`speedup`](https://github.com/grafana/regexp/tree/speedup) branch for the improved code. + +## Benchmarks: + +![image](https://user-images.githubusercontent.com/8125524/152182951-856549ed-6044-4285-b799-69b31f598e32.png) diff --git a/vendor/github.com/grafana/regexp/backtrack.go b/vendor/github.com/grafana/regexp/backtrack.go new file mode 100644 index 00000000..7c37c66a --- /dev/null +++ b/vendor/github.com/grafana/regexp/backtrack.go @@ -0,0 +1,365 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// backtrack is a regular expression search with submatch +// tracking for small regular expressions and texts. It allocates +// a bit vector with (length of input) * (length of prog) bits, +// to make sure it never explores the same (character position, instruction) +// state multiple times. This limits the search to run in time linear in +// the length of the test. +// +// backtrack is a fast replacement for the NFA code on small +// regexps when onepass cannot be used. + +package regexp + +import ( + "regexp/syntax" + "sync" +) + +// A job is an entry on the backtracker's job stack. It holds +// the instruction pc and the position in the input. +type job struct { + pc uint32 + arg bool + pos int +} + +const ( + visitedBits = 32 + maxBacktrackProg = 500 // len(prog.Inst) <= max + maxBacktrackVector = 256 * 1024 // bit vector size <= max (bits) +) + +// bitState holds state for the backtracker. +type bitState struct { + end int + cap []int + matchcap []int + jobs []job + visited []uint32 + + inputs inputs +} + +var bitStatePool sync.Pool + +func newBitState() *bitState { + b, ok := bitStatePool.Get().(*bitState) + if !ok { + b = new(bitState) + } + return b +} + +func freeBitState(b *bitState) { + b.inputs.clear() + bitStatePool.Put(b) +} + +// maxBitStateLen returns the maximum length of a string to search with +// the backtracker using prog. +func maxBitStateLen(prog *syntax.Prog) int { + if !shouldBacktrack(prog) { + return 0 + } + return maxBacktrackVector / len(prog.Inst) +} + +// shouldBacktrack reports whether the program is too +// long for the backtracker to run. +func shouldBacktrack(prog *syntax.Prog) bool { + return len(prog.Inst) <= maxBacktrackProg +} + +// reset resets the state of the backtracker. +// end is the end position in the input. +// ncap is the number of captures. +func (b *bitState) reset(prog *syntax.Prog, end int, ncap int) { + b.end = end + + if cap(b.jobs) == 0 { + b.jobs = make([]job, 0, 256) + } else { + b.jobs = b.jobs[:0] + } + + visitedSize := (len(prog.Inst)*(end+1) + visitedBits - 1) / visitedBits + if cap(b.visited) < visitedSize { + b.visited = make([]uint32, visitedSize, maxBacktrackVector/visitedBits) + } else { + b.visited = b.visited[:visitedSize] + clear(b.visited) // set to 0 + } + + if cap(b.cap) < ncap { + b.cap = make([]int, ncap) + } else { + b.cap = b.cap[:ncap] + } + for i := range b.cap { + b.cap[i] = -1 + } + + if cap(b.matchcap) < ncap { + b.matchcap = make([]int, ncap) + } else { + b.matchcap = b.matchcap[:ncap] + } + for i := range b.matchcap { + b.matchcap[i] = -1 + } +} + +// shouldVisit reports whether the combination of (pc, pos) has not +// been visited yet. +func (b *bitState) shouldVisit(pc uint32, pos int) bool { + n := uint(int(pc)*(b.end+1) + pos) + if b.visited[n/visitedBits]&(1<<(n&(visitedBits-1))) != 0 { + return false + } + b.visited[n/visitedBits] |= 1 << (n & (visitedBits - 1)) + return true +} + +// push pushes (pc, pos, arg) onto the job stack if it should be +// visited. +func (b *bitState) push(re *Regexp, pc uint32, pos int, arg bool) { + // Only check shouldVisit when arg is false. + // When arg is true, we are continuing a previous visit. + if re.prog.Inst[pc].Op != syntax.InstFail && (arg || b.shouldVisit(pc, pos)) { + b.jobs = append(b.jobs, job{pc: pc, arg: arg, pos: pos}) + } +} + +// tryBacktrack runs a backtracking search starting at pos. +func (re *Regexp) tryBacktrack(b *bitState, i input, pc uint32, pos int) bool { + longest := re.longest + + b.push(re, pc, pos, false) + for len(b.jobs) > 0 { + l := len(b.jobs) - 1 + // Pop job off the stack. + pc := b.jobs[l].pc + pos := b.jobs[l].pos + arg := b.jobs[l].arg + b.jobs = b.jobs[:l] + + // Optimization: rather than push and pop, + // code that is going to Push and continue + // the loop simply updates ip, p, and arg + // and jumps to CheckAndLoop. We have to + // do the ShouldVisit check that Push + // would have, but we avoid the stack + // manipulation. + goto Skip + CheckAndLoop: + if !b.shouldVisit(pc, pos) { + continue + } + Skip: + + inst := &re.prog.Inst[pc] + + switch inst.Op { + default: + panic("bad inst") + case syntax.InstFail: + panic("unexpected InstFail") + case syntax.InstAlt: + // Cannot just + // b.push(inst.Out, pos, false) + // b.push(inst.Arg, pos, false) + // If during the processing of inst.Out, we encounter + // inst.Arg via another path, we want to process it then. + // Pushing it here will inhibit that. Instead, re-push + // inst with arg==true as a reminder to push inst.Arg out + // later. + if arg { + // Finished inst.Out; try inst.Arg. + arg = false + pc = inst.Arg + goto CheckAndLoop + } else { + b.push(re, pc, pos, true) + pc = inst.Out + goto CheckAndLoop + } + + case syntax.InstAltMatch: + // One opcode consumes runes; the other leads to match. + switch re.prog.Inst[inst.Out].Op { + case syntax.InstRune, syntax.InstRune1, syntax.InstRuneAny, syntax.InstRuneAnyNotNL: + // inst.Arg is the match. + b.push(re, inst.Arg, pos, false) + pc = inst.Arg + pos = b.end + goto CheckAndLoop + } + // inst.Out is the match - non-greedy + b.push(re, inst.Out, b.end, false) + pc = inst.Out + goto CheckAndLoop + + case syntax.InstRune: + r, width := i.step(pos) + if !inst.MatchRune(r) { + continue + } + pos += width + pc = inst.Out + goto CheckAndLoop + + case syntax.InstRune1: + r, width := i.step(pos) + if r != inst.Rune[0] { + continue + } + pos += width + pc = inst.Out + goto CheckAndLoop + + case syntax.InstRuneAnyNotNL: + r, width := i.step(pos) + if r == '\n' || r == endOfText { + continue + } + pos += width + pc = inst.Out + goto CheckAndLoop + + case syntax.InstRuneAny: + r, width := i.step(pos) + if r == endOfText { + continue + } + pos += width + pc = inst.Out + goto CheckAndLoop + + case syntax.InstCapture: + if arg { + // Finished inst.Out; restore the old value. + b.cap[inst.Arg] = pos + continue + } else { + if inst.Arg < uint32(len(b.cap)) { + // Capture pos to register, but save old value. + b.push(re, pc, b.cap[inst.Arg], true) // come back when we're done. + b.cap[inst.Arg] = pos + } + pc = inst.Out + goto CheckAndLoop + } + + case syntax.InstEmptyWidth: + flag := i.context(pos) + if !flag.match(syntax.EmptyOp(inst.Arg)) { + continue + } + pc = inst.Out + goto CheckAndLoop + + case syntax.InstNop: + pc = inst.Out + goto CheckAndLoop + + case syntax.InstMatch: + // We found a match. If the caller doesn't care + // where the match is, no point going further. + if len(b.cap) == 0 { + return true + } + + // Record best match so far. + // Only need to check end point, because this entire + // call is only considering one start position. + if len(b.cap) > 1 { + b.cap[1] = pos + } + if old := b.matchcap[1]; old == -1 || (longest && pos > 0 && pos > old) { + copy(b.matchcap, b.cap) + } + + // If going for first match, we're done. + if !longest { + return true + } + + // If we used the entire text, no longer match is possible. + if pos == b.end { + return true + } + + // Otherwise, continue on in hope of a longer match. + continue + } + } + + return longest && len(b.matchcap) > 1 && b.matchcap[1] >= 0 +} + +// backtrack runs a backtracking search of prog on the input starting at pos. +func (re *Regexp) backtrack(ib []byte, is string, pos int, ncap int, dstCap []int) []int { + startCond := re.cond + if startCond == ^syntax.EmptyOp(0) { // impossible + return nil + } + if startCond&syntax.EmptyBeginText != 0 && pos != 0 { + // Anchored match, past beginning of text. + return nil + } + + b := newBitState() + i, end := b.inputs.init(nil, ib, is) + b.reset(re.prog, end, ncap) + + // Anchored search must start at the beginning of the input + if startCond&syntax.EmptyBeginText != 0 { + if len(b.cap) > 0 { + b.cap[0] = pos + } + if !re.tryBacktrack(b, i, uint32(re.prog.Start), pos) { + freeBitState(b) + return nil + } + } else { + + // Unanchored search, starting from each possible text position. + // Notice that we have to try the empty string at the end of + // the text, so the loop condition is pos <= end, not pos < end. + // This looks like it's quadratic in the size of the text, + // but we are not clearing visited between calls to TrySearch, + // so no work is duplicated and it ends up still being linear. + width := -1 + for ; pos <= end && width != 0; pos += width { + if len(re.prefix) > 0 { + // Match requires literal prefix; fast search for it. + advance := i.index(re, pos) + if advance < 0 { + freeBitState(b) + return nil + } + pos += advance + } + + if len(b.cap) > 0 { + b.cap[0] = pos + } + if re.tryBacktrack(b, i, uint32(re.prog.Start), pos) { + // Match must be leftmost; done. + goto Match + } + _, width = i.step(pos) + } + freeBitState(b) + return nil + } + +Match: + dstCap = append(dstCap, b.matchcap...) + freeBitState(b) + return dstCap +} diff --git a/vendor/github.com/grafana/regexp/exec.go b/vendor/github.com/grafana/regexp/exec.go new file mode 100644 index 00000000..3fc4b684 --- /dev/null +++ b/vendor/github.com/grafana/regexp/exec.go @@ -0,0 +1,554 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package regexp + +import ( + "io" + "regexp/syntax" + "sync" +) + +// A queue is a 'sparse array' holding pending threads of execution. +// See https://research.swtch.com/2008/03/using-uninitialized-memory-for-fun-and.html +type queue struct { + sparse []uint32 + dense []entry +} + +// An entry is an entry on a queue. +// It holds both the instruction pc and the actual thread. +// Some queue entries are just place holders so that the machine +// knows it has considered that pc. Such entries have t == nil. +type entry struct { + pc uint32 + t *thread +} + +// A thread is the state of a single path through the machine: +// an instruction and a corresponding capture array. +// See https://swtch.com/~rsc/regexp/regexp2.html +type thread struct { + inst *syntax.Inst + cap []int +} + +// A machine holds all the state during an NFA simulation for p. +type machine struct { + re *Regexp // corresponding Regexp + p *syntax.Prog // compiled program + q0, q1 queue // two queues for runq, nextq + pool []*thread // pool of available threads + matched bool // whether a match was found + matchcap []int // capture information for the match + + inputs inputs +} + +type inputs struct { + // cached inputs, to avoid allocation + bytes inputBytes + string inputString + reader inputReader +} + +func (i *inputs) newBytes(b []byte) input { + i.bytes.str = b + return &i.bytes +} + +func (i *inputs) newString(s string) input { + i.string.str = s + return &i.string +} + +func (i *inputs) newReader(r io.RuneReader) input { + i.reader.r = r + i.reader.atEOT = false + i.reader.pos = 0 + return &i.reader +} + +func (i *inputs) clear() { + // We need to clear 1 of these. + // Avoid the expense of clearing the others (pointer write barrier). + if i.bytes.str != nil { + i.bytes.str = nil + } else if i.reader.r != nil { + i.reader.r = nil + } else { + i.string.str = "" + } +} + +func (i *inputs) init(r io.RuneReader, b []byte, s string) (input, int) { + if r != nil { + return i.newReader(r), 0 + } + if b != nil { + return i.newBytes(b), len(b) + } + return i.newString(s), len(s) +} + +func (m *machine) init(ncap int) { + for _, t := range m.pool { + t.cap = t.cap[:ncap] + } + m.matchcap = m.matchcap[:ncap] +} + +// alloc allocates a new thread with the given instruction. +// It uses the free pool if possible. +func (m *machine) alloc(i *syntax.Inst) *thread { + var t *thread + if n := len(m.pool); n > 0 { + t = m.pool[n-1] + m.pool = m.pool[:n-1] + } else { + t = new(thread) + t.cap = make([]int, len(m.matchcap), cap(m.matchcap)) + } + t.inst = i + return t +} + +// A lazyFlag is a lazily-evaluated syntax.EmptyOp, +// for checking zero-width flags like ^ $ \A \z \B \b. +// It records the pair of relevant runes and does not +// determine the implied flags until absolutely necessary +// (most of the time, that means never). +type lazyFlag uint64 + +func newLazyFlag(r1, r2 rune) lazyFlag { + return lazyFlag(uint64(r1)<<32 | uint64(uint32(r2))) +} + +func (f lazyFlag) match(op syntax.EmptyOp) bool { + if op == 0 { + return true + } + r1 := rune(f >> 32) + if op&syntax.EmptyBeginLine != 0 { + if r1 != '\n' && r1 >= 0 { + return false + } + op &^= syntax.EmptyBeginLine + } + if op&syntax.EmptyBeginText != 0 { + if r1 >= 0 { + return false + } + op &^= syntax.EmptyBeginText + } + if op == 0 { + return true + } + r2 := rune(f) + if op&syntax.EmptyEndLine != 0 { + if r2 != '\n' && r2 >= 0 { + return false + } + op &^= syntax.EmptyEndLine + } + if op&syntax.EmptyEndText != 0 { + if r2 >= 0 { + return false + } + op &^= syntax.EmptyEndText + } + if op == 0 { + return true + } + if syntax.IsWordChar(r1) != syntax.IsWordChar(r2) { + op &^= syntax.EmptyWordBoundary + } else { + op &^= syntax.EmptyNoWordBoundary + } + return op == 0 +} + +// match runs the machine over the input starting at pos. +// It reports whether a match was found. +// If so, m.matchcap holds the submatch information. +func (m *machine) match(i input, pos int) bool { + startCond := m.re.cond + if startCond == ^syntax.EmptyOp(0) { // impossible + return false + } + m.matched = false + for i := range m.matchcap { + m.matchcap[i] = -1 + } + runq, nextq := &m.q0, &m.q1 + r, r1 := endOfText, endOfText + width, width1 := 0, 0 + r, width = i.step(pos) + if r != endOfText { + r1, width1 = i.step(pos + width) + } + var flag lazyFlag + if pos == 0 { + flag = newLazyFlag(-1, r) + } else { + flag = i.context(pos) + } + for { + if len(runq.dense) == 0 { + if startCond&syntax.EmptyBeginText != 0 && pos != 0 { + // Anchored match, past beginning of text. + break + } + if m.matched { + // Have match; finished exploring alternatives. + break + } + if len(m.re.prefix) > 0 && r1 != m.re.prefixRune && i.canCheckPrefix() { + // Match requires literal prefix; fast search for it. + advance := i.index(m.re, pos) + if advance < 0 { + break + } + pos += advance + r, width = i.step(pos) + r1, width1 = i.step(pos + width) + } + } + if !m.matched { + if len(m.matchcap) > 0 { + m.matchcap[0] = pos + } + m.add(runq, uint32(m.p.Start), pos, m.matchcap, &flag, nil) + } + flag = newLazyFlag(r, r1) + m.step(runq, nextq, pos, pos+width, r, &flag) + if width == 0 { + break + } + if len(m.matchcap) == 0 && m.matched { + // Found a match and not paying attention + // to where it is, so any match will do. + break + } + pos += width + r, width = r1, width1 + if r != endOfText { + r1, width1 = i.step(pos + width) + } + runq, nextq = nextq, runq + } + m.clear(nextq) + return m.matched +} + +// clear frees all threads on the thread queue. +func (m *machine) clear(q *queue) { + for _, d := range q.dense { + if d.t != nil { + m.pool = append(m.pool, d.t) + } + } + q.dense = q.dense[:0] +} + +// step executes one step of the machine, running each of the threads +// on runq and appending new threads to nextq. +// The step processes the rune c (which may be endOfText), +// which starts at position pos and ends at nextPos. +// nextCond gives the setting for the empty-width flags after c. +func (m *machine) step(runq, nextq *queue, pos, nextPos int, c rune, nextCond *lazyFlag) { + longest := m.re.longest + for j := 0; j < len(runq.dense); j++ { + d := &runq.dense[j] + t := d.t + if t == nil { + continue + } + if longest && m.matched && len(t.cap) > 0 && m.matchcap[0] < t.cap[0] { + m.pool = append(m.pool, t) + continue + } + i := t.inst + add := false + switch i.Op { + default: + panic("bad inst") + + case syntax.InstMatch: + if len(t.cap) > 0 && (!longest || !m.matched || m.matchcap[1] < pos) { + t.cap[1] = pos + copy(m.matchcap, t.cap) + } + if !longest { + // First-match mode: cut off all lower-priority threads. + for _, d := range runq.dense[j+1:] { + if d.t != nil { + m.pool = append(m.pool, d.t) + } + } + runq.dense = runq.dense[:0] + } + m.matched = true + + case syntax.InstRune: + add = i.MatchRune(c) + case syntax.InstRune1: + add = c == i.Rune[0] + case syntax.InstRuneAny: + add = true + case syntax.InstRuneAnyNotNL: + add = c != '\n' + } + if add { + t = m.add(nextq, i.Out, nextPos, t.cap, nextCond, t) + } + if t != nil { + m.pool = append(m.pool, t) + } + } + runq.dense = runq.dense[:0] +} + +// add adds an entry to q for pc, unless the q already has such an entry. +// It also recursively adds an entry for all instructions reachable from pc by following +// empty-width conditions satisfied by cond. pos gives the current position +// in the input. +func (m *machine) add(q *queue, pc uint32, pos int, cap []int, cond *lazyFlag, t *thread) *thread { +Again: + if pc == 0 { + return t + } + if j := q.sparse[pc]; j < uint32(len(q.dense)) && q.dense[j].pc == pc { + return t + } + + j := len(q.dense) + q.dense = q.dense[:j+1] + d := &q.dense[j] + d.t = nil + d.pc = pc + q.sparse[pc] = uint32(j) + + i := &m.p.Inst[pc] + switch i.Op { + default: + panic("unhandled") + case syntax.InstFail: + // nothing + case syntax.InstAlt, syntax.InstAltMatch: + t = m.add(q, i.Out, pos, cap, cond, t) + pc = i.Arg + goto Again + case syntax.InstEmptyWidth: + if cond.match(syntax.EmptyOp(i.Arg)) { + pc = i.Out + goto Again + } + case syntax.InstNop: + pc = i.Out + goto Again + case syntax.InstCapture: + if int(i.Arg) < len(cap) { + opos := cap[i.Arg] + cap[i.Arg] = pos + m.add(q, i.Out, pos, cap, cond, nil) + cap[i.Arg] = opos + } else { + pc = i.Out + goto Again + } + case syntax.InstMatch, syntax.InstRune, syntax.InstRune1, syntax.InstRuneAny, syntax.InstRuneAnyNotNL: + if t == nil { + t = m.alloc(i) + } else { + t.inst = i + } + if len(cap) > 0 && &t.cap[0] != &cap[0] { + copy(t.cap, cap) + } + d.t = t + t = nil + } + return t +} + +type onePassMachine struct { + inputs inputs + matchcap []int +} + +var onePassPool sync.Pool + +func newOnePassMachine() *onePassMachine { + m, ok := onePassPool.Get().(*onePassMachine) + if !ok { + m = new(onePassMachine) + } + return m +} + +func freeOnePassMachine(m *onePassMachine) { + m.inputs.clear() + onePassPool.Put(m) +} + +// doOnePass implements r.doExecute using the one-pass execution engine. +func (re *Regexp) doOnePass(ir io.RuneReader, ib []byte, is string, pos, ncap int, dstCap []int) []int { + startCond := re.cond + if startCond == ^syntax.EmptyOp(0) { // impossible + return nil + } + + m := newOnePassMachine() + if cap(m.matchcap) < ncap { + m.matchcap = make([]int, ncap) + } else { + m.matchcap = m.matchcap[:ncap] + } + + matched := false + for i := range m.matchcap { + m.matchcap[i] = -1 + } + + i, _ := m.inputs.init(ir, ib, is) + + r, r1 := endOfText, endOfText + width, width1 := 0, 0 + r, width = i.step(pos) + if r != endOfText { + r1, width1 = i.step(pos + width) + } + var flag lazyFlag + if pos == 0 { + flag = newLazyFlag(-1, r) + } else { + flag = i.context(pos) + } + pc := re.onepass.Start + inst := &re.onepass.Inst[pc] + // If there is a simple literal prefix, skip over it. + if pos == 0 && flag.match(syntax.EmptyOp(inst.Arg)) && + len(re.prefix) > 0 && i.canCheckPrefix() { + // Match requires literal prefix; fast search for it. + if !i.hasPrefix(re) { + goto Return + } + pos += len(re.prefix) + r, width = i.step(pos) + r1, width1 = i.step(pos + width) + flag = i.context(pos) + pc = int(re.prefixEnd) + } + for { + inst = &re.onepass.Inst[pc] + pc = int(inst.Out) + switch inst.Op { + default: + panic("bad inst") + case syntax.InstMatch: + matched = true + if len(m.matchcap) > 0 { + m.matchcap[0] = 0 + m.matchcap[1] = pos + } + goto Return + case syntax.InstRune: + if !inst.MatchRune(r) { + goto Return + } + case syntax.InstRune1: + if r != inst.Rune[0] { + goto Return + } + case syntax.InstRuneAny: + // Nothing + case syntax.InstRuneAnyNotNL: + if r == '\n' { + goto Return + } + // peek at the input rune to see which branch of the Alt to take + case syntax.InstAlt, syntax.InstAltMatch: + pc = int(onePassNext(inst, r)) + continue + case syntax.InstFail: + goto Return + case syntax.InstNop: + continue + case syntax.InstEmptyWidth: + if !flag.match(syntax.EmptyOp(inst.Arg)) { + goto Return + } + continue + case syntax.InstCapture: + if int(inst.Arg) < len(m.matchcap) { + m.matchcap[inst.Arg] = pos + } + continue + } + if width == 0 { + break + } + flag = newLazyFlag(r, r1) + pos += width + r, width = r1, width1 + if r != endOfText { + r1, width1 = i.step(pos + width) + } + } + +Return: + if !matched { + freeOnePassMachine(m) + return nil + } + + dstCap = append(dstCap, m.matchcap...) + freeOnePassMachine(m) + return dstCap +} + +// doMatch reports whether either r, b or s match the regexp. +func (re *Regexp) doMatch(r io.RuneReader, b []byte, s string) bool { + return re.doExecute(r, b, s, 0, 0, nil) != nil +} + +// doExecute finds the leftmost match in the input, appends the position +// of its subexpressions to dstCap and returns dstCap. +// +// nil is returned if no matches are found and non-nil if matches are found. +func (re *Regexp) doExecute(r io.RuneReader, b []byte, s string, pos int, ncap int, dstCap []int) []int { + if dstCap == nil { + // Make sure 'return dstCap' is non-nil. + dstCap = arrayNoInts[:0:0] + } + + if r == nil && len(b)+len(s) < re.minInputLen { + return nil + } + + if re.onepass != nil { + return re.doOnePass(r, b, s, pos, ncap, dstCap) + } + if r == nil && len(b)+len(s) < re.maxBitStateLen { + return re.backtrack(b, s, pos, ncap, dstCap) + } + + m := re.get() + i, _ := m.inputs.init(r, b, s) + + m.init(ncap) + if !m.match(i, pos) { + re.put(m) + return nil + } + + dstCap = append(dstCap, m.matchcap...) + re.put(m) + return dstCap +} + +// arrayNoInts is returned by doExecute match if nil dstCap is passed +// to it with ncap=0. +var arrayNoInts [0]int diff --git a/vendor/github.com/grafana/regexp/onepass.go b/vendor/github.com/grafana/regexp/onepass.go new file mode 100644 index 00000000..53cbd958 --- /dev/null +++ b/vendor/github.com/grafana/regexp/onepass.go @@ -0,0 +1,500 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package regexp + +import ( + "regexp/syntax" + "slices" + "strings" + "unicode" + "unicode/utf8" +) + +// "One-pass" regexp execution. +// Some regexps can be analyzed to determine that they never need +// backtracking: they are guaranteed to run in one pass over the string +// without bothering to save all the usual NFA state. +// Detect those and execute them more quickly. + +// A onePassProg is a compiled one-pass regular expression program. +// It is the same as syntax.Prog except for the use of onePassInst. +type onePassProg struct { + Inst []onePassInst + Start int // index of start instruction + NumCap int // number of InstCapture insts in re +} + +// A onePassInst is a single instruction in a one-pass regular expression program. +// It is the same as syntax.Inst except for the new 'Next' field. +type onePassInst struct { + syntax.Inst + Next []uint32 +} + +// onePassPrefix returns a literal string that all matches for the +// regexp must start with. Complete is true if the prefix +// is the entire match. Pc is the index of the last rune instruction +// in the string. The onePassPrefix skips over the mandatory +// EmptyBeginText. +func onePassPrefix(p *syntax.Prog) (prefix string, complete bool, pc uint32) { + i := &p.Inst[p.Start] + if i.Op != syntax.InstEmptyWidth || (syntax.EmptyOp(i.Arg))&syntax.EmptyBeginText == 0 { + return "", i.Op == syntax.InstMatch, uint32(p.Start) + } + pc = i.Out + i = &p.Inst[pc] + for i.Op == syntax.InstNop { + pc = i.Out + i = &p.Inst[pc] + } + // Avoid allocation of buffer if prefix is empty. + if iop(i) != syntax.InstRune || len(i.Rune) != 1 { + return "", i.Op == syntax.InstMatch, uint32(p.Start) + } + + // Have prefix; gather characters. + var buf strings.Builder + for iop(i) == syntax.InstRune && len(i.Rune) == 1 && syntax.Flags(i.Arg)&syntax.FoldCase == 0 && i.Rune[0] != utf8.RuneError { + buf.WriteRune(i.Rune[0]) + pc, i = i.Out, &p.Inst[i.Out] + } + if i.Op == syntax.InstEmptyWidth && + syntax.EmptyOp(i.Arg)&syntax.EmptyEndText != 0 && + p.Inst[i.Out].Op == syntax.InstMatch { + complete = true + } + return buf.String(), complete, pc +} + +// onePassNext selects the next actionable state of the prog, based on the input character. +// It should only be called when i.Op == InstAlt or InstAltMatch, and from the one-pass machine. +// One of the alternates may ultimately lead without input to end of line. If the instruction +// is InstAltMatch the path to the InstMatch is in i.Out, the normal node in i.Next. +func onePassNext(i *onePassInst, r rune) uint32 { + next := i.MatchRunePos(r) + if next >= 0 { + return i.Next[next] + } + if i.Op == syntax.InstAltMatch { + return i.Out + } + return 0 +} + +func iop(i *syntax.Inst) syntax.InstOp { + op := i.Op + switch op { + case syntax.InstRune1, syntax.InstRuneAny, syntax.InstRuneAnyNotNL: + op = syntax.InstRune + } + return op +} + +// Sparse Array implementation is used as a queueOnePass. +type queueOnePass struct { + sparse []uint32 + dense []uint32 + size, nextIndex uint32 +} + +func (q *queueOnePass) empty() bool { + return q.nextIndex >= q.size +} + +func (q *queueOnePass) next() (n uint32) { + n = q.dense[q.nextIndex] + q.nextIndex++ + return +} + +func (q *queueOnePass) clear() { + q.size = 0 + q.nextIndex = 0 +} + +func (q *queueOnePass) contains(u uint32) bool { + if u >= uint32(len(q.sparse)) { + return false + } + return q.sparse[u] < q.size && q.dense[q.sparse[u]] == u +} + +func (q *queueOnePass) insert(u uint32) { + if !q.contains(u) { + q.insertNew(u) + } +} + +func (q *queueOnePass) insertNew(u uint32) { + if u >= uint32(len(q.sparse)) { + return + } + q.sparse[u] = q.size + q.dense[q.size] = u + q.size++ +} + +func newQueue(size int) (q *queueOnePass) { + return &queueOnePass{ + sparse: make([]uint32, size), + dense: make([]uint32, size), + } +} + +// mergeRuneSets merges two non-intersecting runesets, and returns the merged result, +// and a NextIp array. The idea is that if a rune matches the OnePassRunes at index +// i, NextIp[i/2] is the target. If the input sets intersect, an empty runeset and a +// NextIp array with the single element mergeFailed is returned. +// The code assumes that both inputs contain ordered and non-intersecting rune pairs. +const mergeFailed = uint32(0xffffffff) + +var ( + noRune = []rune{} + noNext = []uint32{mergeFailed} +) + +func mergeRuneSets(leftRunes, rightRunes *[]rune, leftPC, rightPC uint32) ([]rune, []uint32) { + leftLen := len(*leftRunes) + rightLen := len(*rightRunes) + if leftLen&0x1 != 0 || rightLen&0x1 != 0 { + panic("mergeRuneSets odd length []rune") + } + var ( + lx, rx int + ) + merged := make([]rune, 0) + next := make([]uint32, 0) + ok := true + defer func() { + if !ok { + merged = nil + next = nil + } + }() + + ix := -1 + extend := func(newLow *int, newArray *[]rune, pc uint32) bool { + if ix > 0 && (*newArray)[*newLow] <= merged[ix] { + return false + } + merged = append(merged, (*newArray)[*newLow], (*newArray)[*newLow+1]) + *newLow += 2 + ix += 2 + next = append(next, pc) + return true + } + + for lx < leftLen || rx < rightLen { + switch { + case rx >= rightLen: + ok = extend(&lx, leftRunes, leftPC) + case lx >= leftLen: + ok = extend(&rx, rightRunes, rightPC) + case (*rightRunes)[rx] < (*leftRunes)[lx]: + ok = extend(&rx, rightRunes, rightPC) + default: + ok = extend(&lx, leftRunes, leftPC) + } + if !ok { + return noRune, noNext + } + } + return merged, next +} + +// cleanupOnePass drops working memory, and restores certain shortcut instructions. +func cleanupOnePass(prog *onePassProg, original *syntax.Prog) { + for ix, instOriginal := range original.Inst { + switch instOriginal.Op { + case syntax.InstAlt, syntax.InstAltMatch, syntax.InstRune: + case syntax.InstCapture, syntax.InstEmptyWidth, syntax.InstNop, syntax.InstMatch, syntax.InstFail: + prog.Inst[ix].Next = nil + case syntax.InstRune1, syntax.InstRuneAny, syntax.InstRuneAnyNotNL: + prog.Inst[ix].Next = nil + prog.Inst[ix] = onePassInst{Inst: instOriginal} + } + } +} + +// onePassCopy creates a copy of the original Prog, as we'll be modifying it. +func onePassCopy(prog *syntax.Prog) *onePassProg { + p := &onePassProg{ + Start: prog.Start, + NumCap: prog.NumCap, + Inst: make([]onePassInst, len(prog.Inst)), + } + for i, inst := range prog.Inst { + p.Inst[i] = onePassInst{Inst: inst} + } + + // rewrites one or more common Prog constructs that enable some otherwise + // non-onepass Progs to be onepass. A:BD (for example) means an InstAlt at + // ip A, that points to ips B & C. + // A:BC + B:DA => A:BC + B:CD + // A:BC + B:DC => A:DC + B:DC + for pc := range p.Inst { + switch p.Inst[pc].Op { + default: + continue + case syntax.InstAlt, syntax.InstAltMatch: + // A:Bx + B:Ay + p_A_Other := &p.Inst[pc].Out + p_A_Alt := &p.Inst[pc].Arg + // make sure a target is another Alt + instAlt := p.Inst[*p_A_Alt] + if !(instAlt.Op == syntax.InstAlt || instAlt.Op == syntax.InstAltMatch) { + p_A_Alt, p_A_Other = p_A_Other, p_A_Alt + instAlt = p.Inst[*p_A_Alt] + if !(instAlt.Op == syntax.InstAlt || instAlt.Op == syntax.InstAltMatch) { + continue + } + } + instOther := p.Inst[*p_A_Other] + // Analyzing both legs pointing to Alts is for another day + if instOther.Op == syntax.InstAlt || instOther.Op == syntax.InstAltMatch { + // too complicated + continue + } + // simple empty transition loop + // A:BC + B:DA => A:BC + B:DC + p_B_Alt := &p.Inst[*p_A_Alt].Out + p_B_Other := &p.Inst[*p_A_Alt].Arg + patch := false + if instAlt.Out == uint32(pc) { + patch = true + } else if instAlt.Arg == uint32(pc) { + patch = true + p_B_Alt, p_B_Other = p_B_Other, p_B_Alt + } + if patch { + *p_B_Alt = *p_A_Other + } + + // empty transition to common target + // A:BC + B:DC => A:DC + B:DC + if *p_A_Other == *p_B_Alt { + *p_A_Alt = *p_B_Other + } + } + } + return p +} + +var anyRuneNotNL = []rune{0, '\n' - 1, '\n' + 1, unicode.MaxRune} +var anyRune = []rune{0, unicode.MaxRune} + +// makeOnePass creates a onepass Prog, if possible. It is possible if at any alt, +// the match engine can always tell which branch to take. The routine may modify +// p if it is turned into a onepass Prog. If it isn't possible for this to be a +// onepass Prog, the Prog nil is returned. makeOnePass is recursive +// to the size of the Prog. +func makeOnePass(p *onePassProg) *onePassProg { + // If the machine is very long, it's not worth the time to check if we can use one pass. + if len(p.Inst) >= 1000 { + return nil + } + + var ( + instQueue = newQueue(len(p.Inst)) + visitQueue = newQueue(len(p.Inst)) + check func(uint32, []bool) bool + onePassRunes = make([][]rune, len(p.Inst)) + ) + + // check that paths from Alt instructions are unambiguous, and rebuild the new + // program as a onepass program + check = func(pc uint32, m []bool) (ok bool) { + ok = true + inst := &p.Inst[pc] + if visitQueue.contains(pc) { + return + } + visitQueue.insert(pc) + switch inst.Op { + case syntax.InstAlt, syntax.InstAltMatch: + ok = check(inst.Out, m) && check(inst.Arg, m) + // check no-input paths to InstMatch + matchOut := m[inst.Out] + matchArg := m[inst.Arg] + if matchOut && matchArg { + ok = false + break + } + // Match on empty goes in inst.Out + if matchArg { + inst.Out, inst.Arg = inst.Arg, inst.Out + matchOut, matchArg = matchArg, matchOut + } + if matchOut { + m[pc] = true + inst.Op = syntax.InstAltMatch + } + + // build a dispatch operator from the two legs of the alt. + onePassRunes[pc], inst.Next = mergeRuneSets( + &onePassRunes[inst.Out], &onePassRunes[inst.Arg], inst.Out, inst.Arg) + if len(inst.Next) > 0 && inst.Next[0] == mergeFailed { + ok = false + break + } + case syntax.InstCapture, syntax.InstNop: + ok = check(inst.Out, m) + m[pc] = m[inst.Out] + // pass matching runes back through these no-ops. + onePassRunes[pc] = append([]rune{}, onePassRunes[inst.Out]...) + inst.Next = make([]uint32, len(onePassRunes[pc])/2+1) + for i := range inst.Next { + inst.Next[i] = inst.Out + } + case syntax.InstEmptyWidth: + ok = check(inst.Out, m) + m[pc] = m[inst.Out] + onePassRunes[pc] = append([]rune{}, onePassRunes[inst.Out]...) + inst.Next = make([]uint32, len(onePassRunes[pc])/2+1) + for i := range inst.Next { + inst.Next[i] = inst.Out + } + case syntax.InstMatch, syntax.InstFail: + m[pc] = inst.Op == syntax.InstMatch + case syntax.InstRune: + m[pc] = false + if len(inst.Next) > 0 { + break + } + instQueue.insert(inst.Out) + if len(inst.Rune) == 0 { + onePassRunes[pc] = []rune{} + inst.Next = []uint32{inst.Out} + break + } + runes := make([]rune, 0) + if len(inst.Rune) == 1 && syntax.Flags(inst.Arg)&syntax.FoldCase != 0 { + r0 := inst.Rune[0] + runes = append(runes, r0, r0) + for r1 := unicode.SimpleFold(r0); r1 != r0; r1 = unicode.SimpleFold(r1) { + runes = append(runes, r1, r1) + } + slices.Sort(runes) + } else { + runes = append(runes, inst.Rune...) + } + onePassRunes[pc] = runes + inst.Next = make([]uint32, len(onePassRunes[pc])/2+1) + for i := range inst.Next { + inst.Next[i] = inst.Out + } + inst.Op = syntax.InstRune + case syntax.InstRune1: + m[pc] = false + if len(inst.Next) > 0 { + break + } + instQueue.insert(inst.Out) + runes := []rune{} + // expand case-folded runes + if syntax.Flags(inst.Arg)&syntax.FoldCase != 0 { + r0 := inst.Rune[0] + runes = append(runes, r0, r0) + for r1 := unicode.SimpleFold(r0); r1 != r0; r1 = unicode.SimpleFold(r1) { + runes = append(runes, r1, r1) + } + slices.Sort(runes) + } else { + runes = append(runes, inst.Rune[0], inst.Rune[0]) + } + onePassRunes[pc] = runes + inst.Next = make([]uint32, len(onePassRunes[pc])/2+1) + for i := range inst.Next { + inst.Next[i] = inst.Out + } + inst.Op = syntax.InstRune + case syntax.InstRuneAny: + m[pc] = false + if len(inst.Next) > 0 { + break + } + instQueue.insert(inst.Out) + onePassRunes[pc] = append([]rune{}, anyRune...) + inst.Next = []uint32{inst.Out} + case syntax.InstRuneAnyNotNL: + m[pc] = false + if len(inst.Next) > 0 { + break + } + instQueue.insert(inst.Out) + onePassRunes[pc] = append([]rune{}, anyRuneNotNL...) + inst.Next = make([]uint32, len(onePassRunes[pc])/2+1) + for i := range inst.Next { + inst.Next[i] = inst.Out + } + } + return + } + + instQueue.clear() + instQueue.insert(uint32(p.Start)) + m := make([]bool, len(p.Inst)) + for !instQueue.empty() { + visitQueue.clear() + pc := instQueue.next() + if !check(pc, m) { + p = nil + break + } + } + if p != nil { + for i := range p.Inst { + p.Inst[i].Rune = onePassRunes[i] + } + } + return p +} + +// compileOnePass returns a new *syntax.Prog suitable for onePass execution if the original Prog +// can be recharacterized as a one-pass regexp program, or syntax.nil if the +// Prog cannot be converted. For a one pass prog, the fundamental condition that must +// be true is: at any InstAlt, there must be no ambiguity about what branch to take. +func compileOnePass(prog *syntax.Prog) (p *onePassProg) { + if prog.Start == 0 { + return nil + } + // onepass regexp is anchored + if prog.Inst[prog.Start].Op != syntax.InstEmptyWidth || + syntax.EmptyOp(prog.Inst[prog.Start].Arg)&syntax.EmptyBeginText != syntax.EmptyBeginText { + return nil + } + // every instruction leading to InstMatch must be EmptyEndText + for _, inst := range prog.Inst { + opOut := prog.Inst[inst.Out].Op + switch inst.Op { + default: + if opOut == syntax.InstMatch { + return nil + } + case syntax.InstAlt, syntax.InstAltMatch: + if opOut == syntax.InstMatch || prog.Inst[inst.Arg].Op == syntax.InstMatch { + return nil + } + case syntax.InstEmptyWidth: + if opOut == syntax.InstMatch { + if syntax.EmptyOp(inst.Arg)&syntax.EmptyEndText == syntax.EmptyEndText { + continue + } + return nil + } + } + } + // Creates a slightly optimized copy of the original Prog + // that cleans up some Prog idioms that block valid onepass programs + p = onePassCopy(prog) + + // checkAmbiguity on InstAlts, build onepass Prog if possible + p = makeOnePass(p) + + if p != nil { + cleanupOnePass(p, prog) + } + return p +} diff --git a/vendor/github.com/grafana/regexp/regexp.go b/vendor/github.com/grafana/regexp/regexp.go new file mode 100644 index 00000000..d1218ad0 --- /dev/null +++ b/vendor/github.com/grafana/regexp/regexp.go @@ -0,0 +1,1304 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package regexp implements regular expression search. +// +// The syntax of the regular expressions accepted is the same +// general syntax used by Perl, Python, and other languages. +// More precisely, it is the syntax accepted by RE2 and described at +// https://golang.org/s/re2syntax, except for \C. +// For an overview of the syntax, see the [regexp/syntax] package. +// +// The regexp implementation provided by this package is +// guaranteed to run in time linear in the size of the input. +// (This is a property not guaranteed by most open source +// implementations of regular expressions.) For more information +// about this property, see +// +// https://swtch.com/~rsc/regexp/regexp1.html +// +// or any book about automata theory. +// +// All characters are UTF-8-encoded code points. +// Following [utf8.DecodeRune], each byte of an invalid UTF-8 sequence +// is treated as if it encoded utf8.RuneError (U+FFFD). +// +// There are 16 methods of [Regexp] that match a regular expression and identify +// the matched text. Their names are matched by this regular expression: +// +// Find(All)?(String)?(Submatch)?(Index)? +// +// If 'All' is present, the routine matches successive non-overlapping +// matches of the entire expression. Empty matches abutting a preceding +// match are ignored. The return value is a slice containing the successive +// return values of the corresponding non-'All' routine. These routines take +// an extra integer argument, n. If n >= 0, the function returns at most n +// matches/submatches; otherwise, it returns all of them. +// +// If 'String' is present, the argument is a string; otherwise it is a slice +// of bytes; return values are adjusted as appropriate. +// +// If 'Submatch' is present, the return value is a slice identifying the +// successive submatches of the expression. Submatches are matches of +// parenthesized subexpressions (also known as capturing groups) within the +// regular expression, numbered from left to right in order of opening +// parenthesis. Submatch 0 is the match of the entire expression, submatch 1 is +// the match of the first parenthesized subexpression, and so on. +// +// If 'Index' is present, matches and submatches are identified by byte index +// pairs within the input string: result[2*n:2*n+2] identifies the indexes of +// the nth submatch. The pair for n==0 identifies the match of the entire +// expression. If 'Index' is not present, the match is identified by the text +// of the match/submatch. If an index is negative or text is nil, it means that +// subexpression did not match any string in the input. For 'String' versions +// an empty string means either no match or an empty match. +// +// There is also a subset of the methods that can be applied to text read +// from a RuneReader: +// +// MatchReader, FindReaderIndex, FindReaderSubmatchIndex +// +// This set may grow. Note that regular expression matches may need to +// examine text beyond the text returned by a match, so the methods that +// match text from a RuneReader may read arbitrarily far into the input +// before returning. +// +// (There are a few other methods that do not match this pattern.) +package regexp + +import ( + "bytes" + "io" + "regexp/syntax" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +// Regexp is the representation of a compiled regular expression. +// A Regexp is safe for concurrent use by multiple goroutines, +// except for configuration methods, such as [Regexp.Longest]. +type Regexp struct { + expr string // as passed to Compile + prog *syntax.Prog // compiled program + onepass *onePassProg // onepass program or nil + numSubexp int + maxBitStateLen int + subexpNames []string + prefix string // required prefix in unanchored matches + prefixBytes []byte // prefix, as a []byte + prefixRune rune // first rune in prefix + prefixEnd uint32 // pc for last rune in prefix + mpool int // pool for machines + matchcap int // size of recorded match lengths + prefixComplete bool // prefix is the entire regexp + cond syntax.EmptyOp // empty-width conditions required at start of match + minInputLen int // minimum length of the input in bytes + + // This field can be modified by the Longest method, + // but it is otherwise read-only. + longest bool // whether regexp prefers leftmost-longest match +} + +// String returns the source text used to compile the regular expression. +func (re *Regexp) String() string { + return re.expr +} + +// Copy returns a new [Regexp] object copied from re. +// Calling [Regexp.Longest] on one copy does not affect another. +// +// Deprecated: In earlier releases, when using a [Regexp] in multiple goroutines, +// giving each goroutine its own copy helped to avoid lock contention. +// As of Go 1.12, using Copy is no longer necessary to avoid lock contention. +// Copy may still be appropriate if the reason for its use is to make +// two copies with different [Regexp.Longest] settings. +func (re *Regexp) Copy() *Regexp { + re2 := *re + return &re2 +} + +// Compile parses a regular expression and returns, if successful, +// a [Regexp] object that can be used to match against text. +// +// When matching against text, the regexp returns a match that +// begins as early as possible in the input (leftmost), and among those +// it chooses the one that a backtracking search would have found first. +// This so-called leftmost-first matching is the same semantics +// that Perl, Python, and other implementations use, although this +// package implements it without the expense of backtracking. +// For POSIX leftmost-longest matching, see [CompilePOSIX]. +func Compile(expr string) (*Regexp, error) { + return compile(expr, syntax.Perl, false) +} + +// CompilePOSIX is like [Compile] but restricts the regular expression +// to POSIX ERE (egrep) syntax and changes the match semantics to +// leftmost-longest. +// +// That is, when matching against text, the regexp returns a match that +// begins as early as possible in the input (leftmost), and among those +// it chooses a match that is as long as possible. +// This so-called leftmost-longest matching is the same semantics +// that early regular expression implementations used and that POSIX +// specifies. +// +// However, there can be multiple leftmost-longest matches, with different +// submatch choices, and here this package diverges from POSIX. +// Among the possible leftmost-longest matches, this package chooses +// the one that a backtracking search would have found first, while POSIX +// specifies that the match be chosen to maximize the length of the first +// subexpression, then the second, and so on from left to right. +// The POSIX rule is computationally prohibitive and not even well-defined. +// See https://swtch.com/~rsc/regexp/regexp2.html#posix for details. +func CompilePOSIX(expr string) (*Regexp, error) { + return compile(expr, syntax.POSIX, true) +} + +// Longest makes future searches prefer the leftmost-longest match. +// That is, when matching against text, the regexp returns a match that +// begins as early as possible in the input (leftmost), and among those +// it chooses a match that is as long as possible. +// This method modifies the [Regexp] and may not be called concurrently +// with any other methods. +func (re *Regexp) Longest() { + re.longest = true +} + +func compile(expr string, mode syntax.Flags, longest bool) (*Regexp, error) { + re, err := syntax.Parse(expr, mode) + if err != nil { + return nil, err + } + maxCap := re.MaxCap() + capNames := re.CapNames() + + re = re.Simplify() + prog, err := syntax.Compile(re) + if err != nil { + return nil, err + } + matchcap := prog.NumCap + if matchcap < 2 { + matchcap = 2 + } + regexp := &Regexp{ + expr: expr, + prog: prog, + onepass: compileOnePass(prog), + numSubexp: maxCap, + subexpNames: capNames, + cond: prog.StartCond(), + longest: longest, + matchcap: matchcap, + minInputLen: minInputLen(re), + } + if regexp.onepass == nil { + regexp.prefix, regexp.prefixComplete = prog.Prefix() + regexp.maxBitStateLen = maxBitStateLen(prog) + } else { + regexp.prefix, regexp.prefixComplete, regexp.prefixEnd = onePassPrefix(prog) + } + if regexp.prefix != "" { + // TODO(rsc): Remove this allocation by adding + // IndexString to package bytes. + regexp.prefixBytes = []byte(regexp.prefix) + regexp.prefixRune, _ = utf8.DecodeRuneInString(regexp.prefix) + } + + n := len(prog.Inst) + i := 0 + for matchSize[i] != 0 && matchSize[i] < n { + i++ + } + regexp.mpool = i + + return regexp, nil +} + +// Pools of *machine for use during (*Regexp).doExecute, +// split up by the size of the execution queues. +// matchPool[i] machines have queue size matchSize[i]. +// On a 64-bit system each queue entry is 16 bytes, +// so matchPool[0] has 16*2*128 = 4kB queues, etc. +// The final matchPool is a catch-all for very large queues. +var ( + matchSize = [...]int{128, 512, 2048, 16384, 0} + matchPool [len(matchSize)]sync.Pool +) + +// get returns a machine to use for matching re. +// It uses the re's machine cache if possible, to avoid +// unnecessary allocation. +func (re *Regexp) get() *machine { + m, ok := matchPool[re.mpool].Get().(*machine) + if !ok { + m = new(machine) + } + m.re = re + m.p = re.prog + if cap(m.matchcap) < re.matchcap { + m.matchcap = make([]int, re.matchcap) + for _, t := range m.pool { + t.cap = make([]int, re.matchcap) + } + } + + // Allocate queues if needed. + // Or reallocate, for "large" match pool. + n := matchSize[re.mpool] + if n == 0 { // large pool + n = len(re.prog.Inst) + } + if len(m.q0.sparse) < n { + m.q0 = queue{make([]uint32, n), make([]entry, 0, n)} + m.q1 = queue{make([]uint32, n), make([]entry, 0, n)} + } + return m +} + +// put returns a machine to the correct machine pool. +func (re *Regexp) put(m *machine) { + m.re = nil + m.p = nil + m.inputs.clear() + matchPool[re.mpool].Put(m) +} + +// minInputLen walks the regexp to find the minimum length of any matchable input. +func minInputLen(re *syntax.Regexp) int { + switch re.Op { + default: + return 0 + case syntax.OpAnyChar, syntax.OpAnyCharNotNL, syntax.OpCharClass: + return 1 + case syntax.OpLiteral: + l := 0 + for _, r := range re.Rune { + if r == utf8.RuneError { + l++ + } else { + l += utf8.RuneLen(r) + } + } + return l + case syntax.OpCapture, syntax.OpPlus: + return minInputLen(re.Sub[0]) + case syntax.OpRepeat: + return re.Min * minInputLen(re.Sub[0]) + case syntax.OpConcat: + l := 0 + for _, sub := range re.Sub { + l += minInputLen(sub) + } + return l + case syntax.OpAlternate: + l := minInputLen(re.Sub[0]) + var lnext int + for _, sub := range re.Sub[1:] { + lnext = minInputLen(sub) + if lnext < l { + l = lnext + } + } + return l + } +} + +// MustCompile is like [Compile] but panics if the expression cannot be parsed. +// It simplifies safe initialization of global variables holding compiled regular +// expressions. +func MustCompile(str string) *Regexp { + regexp, err := Compile(str) + if err != nil { + panic(`regexp: Compile(` + quote(str) + `): ` + err.Error()) + } + return regexp +} + +// MustCompilePOSIX is like [CompilePOSIX] but panics if the expression cannot be parsed. +// It simplifies safe initialization of global variables holding compiled regular +// expressions. +func MustCompilePOSIX(str string) *Regexp { + regexp, err := CompilePOSIX(str) + if err != nil { + panic(`regexp: CompilePOSIX(` + quote(str) + `): ` + err.Error()) + } + return regexp +} + +func quote(s string) string { + if strconv.CanBackquote(s) { + return "`" + s + "`" + } + return strconv.Quote(s) +} + +// NumSubexp returns the number of parenthesized subexpressions in this [Regexp]. +func (re *Regexp) NumSubexp() int { + return re.numSubexp +} + +// SubexpNames returns the names of the parenthesized subexpressions +// in this [Regexp]. The name for the first sub-expression is names[1], +// so that if m is a match slice, the name for m[i] is SubexpNames()[i]. +// Since the Regexp as a whole cannot be named, names[0] is always +// the empty string. The slice should not be modified. +func (re *Regexp) SubexpNames() []string { + return re.subexpNames +} + +// SubexpIndex returns the index of the first subexpression with the given name, +// or -1 if there is no subexpression with that name. +// +// Note that multiple subexpressions can be written using the same name, as in +// (?Pa+)(?Pb+), which declares two subexpressions named "bob". +// In this case, SubexpIndex returns the index of the leftmost such subexpression +// in the regular expression. +func (re *Regexp) SubexpIndex(name string) int { + if name != "" { + for i, s := range re.subexpNames { + if name == s { + return i + } + } + } + return -1 +} + +const endOfText rune = -1 + +// input abstracts different representations of the input text. It provides +// one-character lookahead. +type input interface { + step(pos int) (r rune, width int) // advance one rune + canCheckPrefix() bool // can we look ahead without losing info? + hasPrefix(re *Regexp) bool + index(re *Regexp, pos int) int + context(pos int) lazyFlag +} + +// inputString scans a string. +type inputString struct { + str string +} + +func (i *inputString) step(pos int) (rune, int) { + if pos < len(i.str) { + c := i.str[pos] + if c < utf8.RuneSelf { + return rune(c), 1 + } + return utf8.DecodeRuneInString(i.str[pos:]) + } + return endOfText, 0 +} + +func (i *inputString) canCheckPrefix() bool { + return true +} + +func (i *inputString) hasPrefix(re *Regexp) bool { + return strings.HasPrefix(i.str, re.prefix) +} + +func (i *inputString) index(re *Regexp, pos int) int { + return strings.Index(i.str[pos:], re.prefix) +} + +func (i *inputString) context(pos int) lazyFlag { + r1, r2 := endOfText, endOfText + // 0 < pos && pos <= len(i.str) + if uint(pos-1) < uint(len(i.str)) { + r1 = rune(i.str[pos-1]) + if r1 >= utf8.RuneSelf { + r1, _ = utf8.DecodeLastRuneInString(i.str[:pos]) + } + } + // 0 <= pos && pos < len(i.str) + if uint(pos) < uint(len(i.str)) { + r2 = rune(i.str[pos]) + if r2 >= utf8.RuneSelf { + r2, _ = utf8.DecodeRuneInString(i.str[pos:]) + } + } + return newLazyFlag(r1, r2) +} + +// inputBytes scans a byte slice. +type inputBytes struct { + str []byte +} + +func (i *inputBytes) step(pos int) (rune, int) { + if pos < len(i.str) { + c := i.str[pos] + if c < utf8.RuneSelf { + return rune(c), 1 + } + return utf8.DecodeRune(i.str[pos:]) + } + return endOfText, 0 +} + +func (i *inputBytes) canCheckPrefix() bool { + return true +} + +func (i *inputBytes) hasPrefix(re *Regexp) bool { + return bytes.HasPrefix(i.str, re.prefixBytes) +} + +func (i *inputBytes) index(re *Regexp, pos int) int { + return bytes.Index(i.str[pos:], re.prefixBytes) +} + +func (i *inputBytes) context(pos int) lazyFlag { + r1, r2 := endOfText, endOfText + // 0 < pos && pos <= len(i.str) + if uint(pos-1) < uint(len(i.str)) { + r1 = rune(i.str[pos-1]) + if r1 >= utf8.RuneSelf { + r1, _ = utf8.DecodeLastRune(i.str[:pos]) + } + } + // 0 <= pos && pos < len(i.str) + if uint(pos) < uint(len(i.str)) { + r2 = rune(i.str[pos]) + if r2 >= utf8.RuneSelf { + r2, _ = utf8.DecodeRune(i.str[pos:]) + } + } + return newLazyFlag(r1, r2) +} + +// inputReader scans a RuneReader. +type inputReader struct { + r io.RuneReader + atEOT bool + pos int +} + +func (i *inputReader) step(pos int) (rune, int) { + if !i.atEOT && pos != i.pos { + return endOfText, 0 + + } + r, w, err := i.r.ReadRune() + if err != nil { + i.atEOT = true + return endOfText, 0 + } + i.pos += w + return r, w +} + +func (i *inputReader) canCheckPrefix() bool { + return false +} + +func (i *inputReader) hasPrefix(re *Regexp) bool { + return false +} + +func (i *inputReader) index(re *Regexp, pos int) int { + return -1 +} + +func (i *inputReader) context(pos int) lazyFlag { + return 0 // not used +} + +// LiteralPrefix returns a literal string that must begin any match +// of the regular expression re. It returns the boolean true if the +// literal string comprises the entire regular expression. +func (re *Regexp) LiteralPrefix() (prefix string, complete bool) { + return re.prefix, re.prefixComplete +} + +// MatchReader reports whether the text returned by the [io.RuneReader] +// contains any match of the regular expression re. +func (re *Regexp) MatchReader(r io.RuneReader) bool { + return re.doMatch(r, nil, "") +} + +// MatchString reports whether the string s +// contains any match of the regular expression re. +func (re *Regexp) MatchString(s string) bool { + return re.doMatch(nil, nil, s) +} + +// Match reports whether the byte slice b +// contains any match of the regular expression re. +func (re *Regexp) Match(b []byte) bool { + return re.doMatch(nil, b, "") +} + +// MatchReader reports whether the text returned by the RuneReader +// contains any match of the regular expression pattern. +// More complicated queries need to use [Compile] and the full [Regexp] interface. +func MatchReader(pattern string, r io.RuneReader) (matched bool, err error) { + re, err := Compile(pattern) + if err != nil { + return false, err + } + return re.MatchReader(r), nil +} + +// MatchString reports whether the string s +// contains any match of the regular expression pattern. +// More complicated queries need to use [Compile] and the full [Regexp] interface. +func MatchString(pattern string, s string) (matched bool, err error) { + re, err := Compile(pattern) + if err != nil { + return false, err + } + return re.MatchString(s), nil +} + +// Match reports whether the byte slice b +// contains any match of the regular expression pattern. +// More complicated queries need to use [Compile] and the full [Regexp] interface. +func Match(pattern string, b []byte) (matched bool, err error) { + re, err := Compile(pattern) + if err != nil { + return false, err + } + return re.Match(b), nil +} + +// ReplaceAllString returns a copy of src, replacing matches of the [Regexp] +// with the replacement string repl. +// Inside repl, $ signs are interpreted as in [Regexp.Expand]. +func (re *Regexp) ReplaceAllString(src, repl string) string { + n := 2 + if strings.Contains(repl, "$") { + n = 2 * (re.numSubexp + 1) + } + b := re.replaceAll(nil, src, n, func(dst []byte, match []int) []byte { + return re.expand(dst, repl, nil, src, match) + }) + return string(b) +} + +// ReplaceAllLiteralString returns a copy of src, replacing matches of the [Regexp] +// with the replacement string repl. The replacement repl is substituted directly, +// without using [Regexp.Expand]. +func (re *Regexp) ReplaceAllLiteralString(src, repl string) string { + return string(re.replaceAll(nil, src, 2, func(dst []byte, match []int) []byte { + return append(dst, repl...) + })) +} + +// ReplaceAllStringFunc returns a copy of src in which all matches of the +// [Regexp] have been replaced by the return value of function repl applied +// to the matched substring. The replacement returned by repl is substituted +// directly, without using [Regexp.Expand]. +func (re *Regexp) ReplaceAllStringFunc(src string, repl func(string) string) string { + b := re.replaceAll(nil, src, 2, func(dst []byte, match []int) []byte { + return append(dst, repl(src[match[0]:match[1]])...) + }) + return string(b) +} + +func (re *Regexp) replaceAll(bsrc []byte, src string, nmatch int, repl func(dst []byte, m []int) []byte) []byte { + lastMatchEnd := 0 // end position of the most recent match + searchPos := 0 // position where we next look for a match + var buf []byte + var endPos int + if bsrc != nil { + endPos = len(bsrc) + } else { + endPos = len(src) + } + if nmatch > re.prog.NumCap { + nmatch = re.prog.NumCap + } + + var dstCap [2]int + for searchPos <= endPos { + a := re.doExecute(nil, bsrc, src, searchPos, nmatch, dstCap[:0]) + if len(a) == 0 { + break // no more matches + } + + // Copy the unmatched characters before this match. + if bsrc != nil { + buf = append(buf, bsrc[lastMatchEnd:a[0]]...) + } else { + buf = append(buf, src[lastMatchEnd:a[0]]...) + } + + // Now insert a copy of the replacement string, but not for a + // match of the empty string immediately after another match. + // (Otherwise, we get double replacement for patterns that + // match both empty and nonempty strings.) + if a[1] > lastMatchEnd || a[0] == 0 { + buf = repl(buf, a) + } + lastMatchEnd = a[1] + + // Advance past this match; always advance at least one character. + var width int + if bsrc != nil { + _, width = utf8.DecodeRune(bsrc[searchPos:]) + } else { + _, width = utf8.DecodeRuneInString(src[searchPos:]) + } + if searchPos+width > a[1] { + searchPos += width + } else if searchPos+1 > a[1] { + // This clause is only needed at the end of the input + // string. In that case, DecodeRuneInString returns width=0. + searchPos++ + } else { + searchPos = a[1] + } + } + + // Copy the unmatched characters after the last match. + if bsrc != nil { + buf = append(buf, bsrc[lastMatchEnd:]...) + } else { + buf = append(buf, src[lastMatchEnd:]...) + } + + return buf +} + +// ReplaceAll returns a copy of src, replacing matches of the [Regexp] +// with the replacement text repl. +// Inside repl, $ signs are interpreted as in [Regexp.Expand]. +func (re *Regexp) ReplaceAll(src, repl []byte) []byte { + n := 2 + if bytes.IndexByte(repl, '$') >= 0 { + n = 2 * (re.numSubexp + 1) + } + srepl := "" + b := re.replaceAll(src, "", n, func(dst []byte, match []int) []byte { + if len(srepl) != len(repl) { + srepl = string(repl) + } + return re.expand(dst, srepl, src, "", match) + }) + return b +} + +// ReplaceAllLiteral returns a copy of src, replacing matches of the [Regexp] +// with the replacement bytes repl. The replacement repl is substituted directly, +// without using [Regexp.Expand]. +func (re *Regexp) ReplaceAllLiteral(src, repl []byte) []byte { + return re.replaceAll(src, "", 2, func(dst []byte, match []int) []byte { + return append(dst, repl...) + }) +} + +// ReplaceAllFunc returns a copy of src in which all matches of the +// [Regexp] have been replaced by the return value of function repl applied +// to the matched byte slice. The replacement returned by repl is substituted +// directly, without using [Regexp.Expand]. +func (re *Regexp) ReplaceAllFunc(src []byte, repl func([]byte) []byte) []byte { + return re.replaceAll(src, "", 2, func(dst []byte, match []int) []byte { + return append(dst, repl(src[match[0]:match[1]])...) + }) +} + +// Bitmap used by func special to check whether a character needs to be escaped. +var specialBytes [16]byte + +// special reports whether byte b needs to be escaped by QuoteMeta. +func special(b byte) bool { + return b < utf8.RuneSelf && specialBytes[b%16]&(1<<(b/16)) != 0 +} + +func init() { + for _, b := range []byte(`\.+*?()|[]{}^$`) { + specialBytes[b%16] |= 1 << (b / 16) + } +} + +// QuoteMeta returns a string that escapes all regular expression metacharacters +// inside the argument text; the returned string is a regular expression matching +// the literal text. +func QuoteMeta(s string) string { + // A byte loop is correct because all metacharacters are ASCII. + var i int + for i = 0; i < len(s); i++ { + if special(s[i]) { + break + } + } + // No meta characters found, so return original string. + if i >= len(s) { + return s + } + + b := make([]byte, 2*len(s)-i) + copy(b, s[:i]) + j := i + for ; i < len(s); i++ { + if special(s[i]) { + b[j] = '\\' + j++ + } + b[j] = s[i] + j++ + } + return string(b[:j]) +} + +// The number of capture values in the program may correspond +// to fewer capturing expressions than are in the regexp. +// For example, "(a){0}" turns into an empty program, so the +// maximum capture in the program is 0 but we need to return +// an expression for \1. Pad appends -1s to the slice a as needed. +func (re *Regexp) pad(a []int) []int { + if a == nil { + // No match. + return nil + } + n := (1 + re.numSubexp) * 2 + for len(a) < n { + a = append(a, -1) + } + return a +} + +// allMatches calls deliver at most n times +// with the location of successive matches in the input text. +// The input text is b if non-nil, otherwise s. +func (re *Regexp) allMatches(s string, b []byte, n int, deliver func([]int)) { + var end int + if b == nil { + end = len(s) + } else { + end = len(b) + } + + for pos, i, prevMatchEnd := 0, 0, -1; i < n && pos <= end; { + matches := re.doExecute(nil, b, s, pos, re.prog.NumCap, nil) + if len(matches) == 0 { + break + } + + accept := true + if matches[1] == pos { + // We've found an empty match. + if matches[0] == prevMatchEnd { + // We don't allow an empty match right + // after a previous match, so ignore it. + accept = false + } + var width int + if b == nil { + is := inputString{str: s} + _, width = is.step(pos) + } else { + ib := inputBytes{str: b} + _, width = ib.step(pos) + } + if width > 0 { + pos += width + } else { + pos = end + 1 + } + } else { + pos = matches[1] + } + prevMatchEnd = matches[1] + + if accept { + deliver(re.pad(matches)) + i++ + } + } +} + +// Find returns a slice holding the text of the leftmost match in b of the regular expression. +// A return value of nil indicates no match. +func (re *Regexp) Find(b []byte) []byte { + var dstCap [2]int + a := re.doExecute(nil, b, "", 0, 2, dstCap[:0]) + if a == nil { + return nil + } + return b[a[0]:a[1]:a[1]] +} + +// FindIndex returns a two-element slice of integers defining the location of +// the leftmost match in b of the regular expression. The match itself is at +// b[loc[0]:loc[1]]. +// A return value of nil indicates no match. +func (re *Regexp) FindIndex(b []byte) (loc []int) { + a := re.doExecute(nil, b, "", 0, 2, nil) + if a == nil { + return nil + } + return a[0:2] +} + +// FindString returns a string holding the text of the leftmost match in s of the regular +// expression. If there is no match, the return value is an empty string, +// but it will also be empty if the regular expression successfully matches +// an empty string. Use [Regexp.FindStringIndex] or [Regexp.FindStringSubmatch] if it is +// necessary to distinguish these cases. +func (re *Regexp) FindString(s string) string { + var dstCap [2]int + a := re.doExecute(nil, nil, s, 0, 2, dstCap[:0]) + if a == nil { + return "" + } + return s[a[0]:a[1]] +} + +// FindStringIndex returns a two-element slice of integers defining the +// location of the leftmost match in s of the regular expression. The match +// itself is at s[loc[0]:loc[1]]. +// A return value of nil indicates no match. +func (re *Regexp) FindStringIndex(s string) (loc []int) { + a := re.doExecute(nil, nil, s, 0, 2, nil) + if a == nil { + return nil + } + return a[0:2] +} + +// FindReaderIndex returns a two-element slice of integers defining the +// location of the leftmost match of the regular expression in text read from +// the [io.RuneReader]. The match text was found in the input stream at +// byte offset loc[0] through loc[1]-1. +// A return value of nil indicates no match. +func (re *Regexp) FindReaderIndex(r io.RuneReader) (loc []int) { + a := re.doExecute(r, nil, "", 0, 2, nil) + if a == nil { + return nil + } + return a[0:2] +} + +// FindSubmatch returns a slice of slices holding the text of the leftmost +// match of the regular expression in b and the matches, if any, of its +// subexpressions, as defined by the 'Submatch' descriptions in the package +// comment. +// A return value of nil indicates no match. +func (re *Regexp) FindSubmatch(b []byte) [][]byte { + var dstCap [4]int + a := re.doExecute(nil, b, "", 0, re.prog.NumCap, dstCap[:0]) + if a == nil { + return nil + } + ret := make([][]byte, 1+re.numSubexp) + for i := range ret { + if 2*i < len(a) && a[2*i] >= 0 { + ret[i] = b[a[2*i]:a[2*i+1]:a[2*i+1]] + } + } + return ret +} + +// Expand appends template to dst and returns the result; during the +// append, Expand replaces variables in the template with corresponding +// matches drawn from src. The match slice should have been returned by +// [Regexp.FindSubmatchIndex]. +// +// In the template, a variable is denoted by a substring of the form +// $name or ${name}, where name is a non-empty sequence of letters, +// digits, and underscores. A purely numeric name like $1 refers to +// the submatch with the corresponding index; other names refer to +// capturing parentheses named with the (?P...) syntax. A +// reference to an out of range or unmatched index or a name that is not +// present in the regular expression is replaced with an empty slice. +// +// In the $name form, name is taken to be as long as possible: $1x is +// equivalent to ${1x}, not ${1}x, and, $10 is equivalent to ${10}, not ${1}0. +// +// To insert a literal $ in the output, use $$ in the template. +func (re *Regexp) Expand(dst []byte, template []byte, src []byte, match []int) []byte { + return re.expand(dst, string(template), src, "", match) +} + +// ExpandString is like [Regexp.Expand] but the template and source are strings. +// It appends to and returns a byte slice in order to give the calling +// code control over allocation. +func (re *Regexp) ExpandString(dst []byte, template string, src string, match []int) []byte { + return re.expand(dst, template, nil, src, match) +} + +func (re *Regexp) expand(dst []byte, template string, bsrc []byte, src string, match []int) []byte { + for len(template) > 0 { + before, after, ok := strings.Cut(template, "$") + if !ok { + break + } + dst = append(dst, before...) + template = after + if template != "" && template[0] == '$' { + // Treat $$ as $. + dst = append(dst, '$') + template = template[1:] + continue + } + name, num, rest, ok := extract(template) + if !ok { + // Malformed; treat $ as raw text. + dst = append(dst, '$') + continue + } + template = rest + if num >= 0 { + if 2*num+1 < len(match) && match[2*num] >= 0 { + if bsrc != nil { + dst = append(dst, bsrc[match[2*num]:match[2*num+1]]...) + } else { + dst = append(dst, src[match[2*num]:match[2*num+1]]...) + } + } + } else { + for i, namei := range re.subexpNames { + if name == namei && 2*i+1 < len(match) && match[2*i] >= 0 { + if bsrc != nil { + dst = append(dst, bsrc[match[2*i]:match[2*i+1]]...) + } else { + dst = append(dst, src[match[2*i]:match[2*i+1]]...) + } + break + } + } + } + } + dst = append(dst, template...) + return dst +} + +// extract returns the name from a leading "name" or "{name}" in str. +// (The $ has already been removed by the caller.) +// If it is a number, extract returns num set to that number; otherwise num = -1. +func extract(str string) (name string, num int, rest string, ok bool) { + if str == "" { + return + } + brace := false + if str[0] == '{' { + brace = true + str = str[1:] + } + i := 0 + for i < len(str) { + rune, size := utf8.DecodeRuneInString(str[i:]) + if !unicode.IsLetter(rune) && !unicode.IsDigit(rune) && rune != '_' { + break + } + i += size + } + if i == 0 { + // empty name is not okay + return + } + name = str[:i] + if brace { + if i >= len(str) || str[i] != '}' { + // missing closing brace + return + } + i++ + } + + // Parse number. + num = 0 + for i := 0; i < len(name); i++ { + if name[i] < '0' || '9' < name[i] || num >= 1e8 { + num = -1 + break + } + num = num*10 + int(name[i]) - '0' + } + // Disallow leading zeros. + if name[0] == '0' && len(name) > 1 { + num = -1 + } + + rest = str[i:] + ok = true + return +} + +// FindSubmatchIndex returns a slice holding the index pairs identifying the +// leftmost match of the regular expression in b and the matches, if any, of +// its subexpressions, as defined by the 'Submatch' and 'Index' descriptions +// in the package comment. +// A return value of nil indicates no match. +func (re *Regexp) FindSubmatchIndex(b []byte) []int { + return re.pad(re.doExecute(nil, b, "", 0, re.prog.NumCap, nil)) +} + +// FindStringSubmatch returns a slice of strings holding the text of the +// leftmost match of the regular expression in s and the matches, if any, of +// its subexpressions, as defined by the 'Submatch' description in the +// package comment. +// A return value of nil indicates no match. +func (re *Regexp) FindStringSubmatch(s string) []string { + var dstCap [4]int + a := re.doExecute(nil, nil, s, 0, re.prog.NumCap, dstCap[:0]) + if a == nil { + return nil + } + ret := make([]string, 1+re.numSubexp) + for i := range ret { + if 2*i < len(a) && a[2*i] >= 0 { + ret[i] = s[a[2*i]:a[2*i+1]] + } + } + return ret +} + +// FindStringSubmatchIndex returns a slice holding the index pairs +// identifying the leftmost match of the regular expression in s and the +// matches, if any, of its subexpressions, as defined by the 'Submatch' and +// 'Index' descriptions in the package comment. +// A return value of nil indicates no match. +func (re *Regexp) FindStringSubmatchIndex(s string) []int { + return re.pad(re.doExecute(nil, nil, s, 0, re.prog.NumCap, nil)) +} + +// FindReaderSubmatchIndex returns a slice holding the index pairs +// identifying the leftmost match of the regular expression of text read by +// the [io.RuneReader], and the matches, if any, of its subexpressions, as defined +// by the 'Submatch' and 'Index' descriptions in the package comment. A +// return value of nil indicates no match. +func (re *Regexp) FindReaderSubmatchIndex(r io.RuneReader) []int { + return re.pad(re.doExecute(r, nil, "", 0, re.prog.NumCap, nil)) +} + +const startSize = 10 // The size at which to start a slice in the 'All' routines. + +// FindAll is the 'All' version of [Regexp.Find]; it returns a slice of all successive +// matches of the expression, as defined by the 'All' description in the +// package comment. +// A return value of nil indicates no match. +func (re *Regexp) FindAll(b []byte, n int) [][]byte { + if n < 0 { + n = len(b) + 1 + } + var result [][]byte + re.allMatches("", b, n, func(match []int) { + if result == nil { + result = make([][]byte, 0, startSize) + } + result = append(result, b[match[0]:match[1]:match[1]]) + }) + return result +} + +// FindAllIndex is the 'All' version of [Regexp.FindIndex]; it returns a slice of all +// successive matches of the expression, as defined by the 'All' description +// in the package comment. +// A return value of nil indicates no match. +func (re *Regexp) FindAllIndex(b []byte, n int) [][]int { + if n < 0 { + n = len(b) + 1 + } + var result [][]int + re.allMatches("", b, n, func(match []int) { + if result == nil { + result = make([][]int, 0, startSize) + } + result = append(result, match[0:2]) + }) + return result +} + +// FindAllString is the 'All' version of [Regexp.FindString]; it returns a slice of all +// successive matches of the expression, as defined by the 'All' description +// in the package comment. +// A return value of nil indicates no match. +func (re *Regexp) FindAllString(s string, n int) []string { + if n < 0 { + n = len(s) + 1 + } + var result []string + re.allMatches(s, nil, n, func(match []int) { + if result == nil { + result = make([]string, 0, startSize) + } + result = append(result, s[match[0]:match[1]]) + }) + return result +} + +// FindAllStringIndex is the 'All' version of [Regexp.FindStringIndex]; it returns a +// slice of all successive matches of the expression, as defined by the 'All' +// description in the package comment. +// A return value of nil indicates no match. +func (re *Regexp) FindAllStringIndex(s string, n int) [][]int { + if n < 0 { + n = len(s) + 1 + } + var result [][]int + re.allMatches(s, nil, n, func(match []int) { + if result == nil { + result = make([][]int, 0, startSize) + } + result = append(result, match[0:2]) + }) + return result +} + +// FindAllSubmatch is the 'All' version of [Regexp.FindSubmatch]; it returns a slice +// of all successive matches of the expression, as defined by the 'All' +// description in the package comment. +// A return value of nil indicates no match. +func (re *Regexp) FindAllSubmatch(b []byte, n int) [][][]byte { + if n < 0 { + n = len(b) + 1 + } + var result [][][]byte + re.allMatches("", b, n, func(match []int) { + if result == nil { + result = make([][][]byte, 0, startSize) + } + slice := make([][]byte, len(match)/2) + for j := range slice { + if match[2*j] >= 0 { + slice[j] = b[match[2*j]:match[2*j+1]:match[2*j+1]] + } + } + result = append(result, slice) + }) + return result +} + +// FindAllSubmatchIndex is the 'All' version of [Regexp.FindSubmatchIndex]; it returns +// a slice of all successive matches of the expression, as defined by the +// 'All' description in the package comment. +// A return value of nil indicates no match. +func (re *Regexp) FindAllSubmatchIndex(b []byte, n int) [][]int { + if n < 0 { + n = len(b) + 1 + } + var result [][]int + re.allMatches("", b, n, func(match []int) { + if result == nil { + result = make([][]int, 0, startSize) + } + result = append(result, match) + }) + return result +} + +// FindAllStringSubmatch is the 'All' version of [Regexp.FindStringSubmatch]; it +// returns a slice of all successive matches of the expression, as defined by +// the 'All' description in the package comment. +// A return value of nil indicates no match. +func (re *Regexp) FindAllStringSubmatch(s string, n int) [][]string { + if n < 0 { + n = len(s) + 1 + } + var result [][]string + re.allMatches(s, nil, n, func(match []int) { + if result == nil { + result = make([][]string, 0, startSize) + } + slice := make([]string, len(match)/2) + for j := range slice { + if match[2*j] >= 0 { + slice[j] = s[match[2*j]:match[2*j+1]] + } + } + result = append(result, slice) + }) + return result +} + +// FindAllStringSubmatchIndex is the 'All' version of +// [Regexp.FindStringSubmatchIndex]; it returns a slice of all successive matches of +// the expression, as defined by the 'All' description in the package +// comment. +// A return value of nil indicates no match. +func (re *Regexp) FindAllStringSubmatchIndex(s string, n int) [][]int { + if n < 0 { + n = len(s) + 1 + } + var result [][]int + re.allMatches(s, nil, n, func(match []int) { + if result == nil { + result = make([][]int, 0, startSize) + } + result = append(result, match) + }) + return result +} + +// Split slices s into substrings separated by the expression and returns a slice of +// the substrings between those expression matches. +// +// The slice returned by this method consists of all the substrings of s +// not contained in the slice returned by [Regexp.FindAllString]. When called on an expression +// that contains no metacharacters, it is equivalent to [strings.SplitN]. +// +// Example: +// +// s := regexp.MustCompile("a*").Split("abaabaccadaaae", 5) +// // s: ["", "b", "b", "c", "cadaaae"] +// +// The count determines the number of substrings to return: +// +// n > 0: at most n substrings; the last substring will be the unsplit remainder. +// n == 0: the result is nil (zero substrings) +// n < 0: all substrings +func (re *Regexp) Split(s string, n int) []string { + + if n == 0 { + return nil + } + + if len(re.expr) > 0 && len(s) == 0 { + return []string{""} + } + + matches := re.FindAllStringIndex(s, n) + strings := make([]string, 0, len(matches)) + + beg := 0 + end := 0 + for _, match := range matches { + if n > 0 && len(strings) >= n-1 { + break + } + + end = match[0] + if match[1] != 0 { + strings = append(strings, s[beg:end]) + } + beg = match[1] + } + + if end != len(s) { + strings = append(strings, s[beg:]) + } + + return strings +} + +// MarshalText implements [encoding.TextMarshaler]. The output +// matches that of calling the [Regexp.String] method. +// +// Note that the output is lossy in some cases: This method does not indicate +// POSIX regular expressions (i.e. those compiled by calling [CompilePOSIX]), or +// those for which the [Regexp.Longest] method has been called. +func (re *Regexp) MarshalText() ([]byte, error) { + return []byte(re.String()), nil +} + +// UnmarshalText implements [encoding.TextUnmarshaler] by calling +// [Compile] on the encoded value. +func (re *Regexp) UnmarshalText(text []byte) error { + newRE, err := Compile(string(text)) + if err != nil { + return err + } + *re = *newRE + return nil +} diff --git a/vendor/github.com/grafana/regexp/syntax/compile.go b/vendor/github.com/grafana/regexp/syntax/compile.go new file mode 100644 index 00000000..c9f9fa02 --- /dev/null +++ b/vendor/github.com/grafana/regexp/syntax/compile.go @@ -0,0 +1,296 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package syntax + +import "unicode" + +// A patchList is a list of instruction pointers that need to be filled in (patched). +// Because the pointers haven't been filled in yet, we can reuse their storage +// to hold the list. It's kind of sleazy, but works well in practice. +// See https://swtch.com/~rsc/regexp/regexp1.html for inspiration. +// +// These aren't really pointers: they're integers, so we can reinterpret them +// this way without using package unsafe. A value l.head denotes +// p.inst[l.head>>1].Out (l.head&1==0) or .Arg (l.head&1==1). +// head == 0 denotes the empty list, okay because we start every program +// with a fail instruction, so we'll never want to point at its output link. +type patchList struct { + head, tail uint32 +} + +func makePatchList(n uint32) patchList { + return patchList{n, n} +} + +func (l patchList) patch(p *Prog, val uint32) { + head := l.head + for head != 0 { + i := &p.Inst[head>>1] + if head&1 == 0 { + head = i.Out + i.Out = val + } else { + head = i.Arg + i.Arg = val + } + } +} + +func (l1 patchList) append(p *Prog, l2 patchList) patchList { + if l1.head == 0 { + return l2 + } + if l2.head == 0 { + return l1 + } + + i := &p.Inst[l1.tail>>1] + if l1.tail&1 == 0 { + i.Out = l2.head + } else { + i.Arg = l2.head + } + return patchList{l1.head, l2.tail} +} + +// A frag represents a compiled program fragment. +type frag struct { + i uint32 // index of first instruction + out patchList // where to record end instruction + nullable bool // whether fragment can match empty string +} + +type compiler struct { + p *Prog +} + +// Compile compiles the regexp into a program to be executed. +// The regexp should have been simplified already (returned from re.Simplify). +func Compile(re *Regexp) (*Prog, error) { + var c compiler + c.init() + f := c.compile(re) + f.out.patch(c.p, c.inst(InstMatch).i) + c.p.Start = int(f.i) + return c.p, nil +} + +func (c *compiler) init() { + c.p = new(Prog) + c.p.NumCap = 2 // implicit ( and ) for whole match $0 + c.inst(InstFail) +} + +var anyRuneNotNL = []rune{0, '\n' - 1, '\n' + 1, unicode.MaxRune} +var anyRune = []rune{0, unicode.MaxRune} + +func (c *compiler) compile(re *Regexp) frag { + switch re.Op { + case OpNoMatch: + return c.fail() + case OpEmptyMatch: + return c.nop() + case OpLiteral: + if len(re.Rune) == 0 { + return c.nop() + } + var f frag + for j := range re.Rune { + f1 := c.rune(re.Rune[j:j+1], re.Flags) + if j == 0 { + f = f1 + } else { + f = c.cat(f, f1) + } + } + return f + case OpCharClass: + return c.rune(re.Rune, re.Flags) + case OpAnyCharNotNL: + return c.rune(anyRuneNotNL, 0) + case OpAnyChar: + return c.rune(anyRune, 0) + case OpBeginLine: + return c.empty(EmptyBeginLine) + case OpEndLine: + return c.empty(EmptyEndLine) + case OpBeginText: + return c.empty(EmptyBeginText) + case OpEndText: + return c.empty(EmptyEndText) + case OpWordBoundary: + return c.empty(EmptyWordBoundary) + case OpNoWordBoundary: + return c.empty(EmptyNoWordBoundary) + case OpCapture: + bra := c.cap(uint32(re.Cap << 1)) + sub := c.compile(re.Sub[0]) + ket := c.cap(uint32(re.Cap<<1 | 1)) + return c.cat(c.cat(bra, sub), ket) + case OpStar: + return c.star(c.compile(re.Sub[0]), re.Flags&NonGreedy != 0) + case OpPlus: + return c.plus(c.compile(re.Sub[0]), re.Flags&NonGreedy != 0) + case OpQuest: + return c.quest(c.compile(re.Sub[0]), re.Flags&NonGreedy != 0) + case OpConcat: + if len(re.Sub) == 0 { + return c.nop() + } + var f frag + for i, sub := range re.Sub { + if i == 0 { + f = c.compile(sub) + } else { + f = c.cat(f, c.compile(sub)) + } + } + return f + case OpAlternate: + var f frag + for _, sub := range re.Sub { + f = c.alt(f, c.compile(sub)) + } + return f + } + panic("regexp: unhandled case in compile") +} + +func (c *compiler) inst(op InstOp) frag { + // TODO: impose length limit + f := frag{i: uint32(len(c.p.Inst)), nullable: true} + c.p.Inst = append(c.p.Inst, Inst{Op: op}) + return f +} + +func (c *compiler) nop() frag { + f := c.inst(InstNop) + f.out = makePatchList(f.i << 1) + return f +} + +func (c *compiler) fail() frag { + return frag{} +} + +func (c *compiler) cap(arg uint32) frag { + f := c.inst(InstCapture) + f.out = makePatchList(f.i << 1) + c.p.Inst[f.i].Arg = arg + + if c.p.NumCap < int(arg)+1 { + c.p.NumCap = int(arg) + 1 + } + return f +} + +func (c *compiler) cat(f1, f2 frag) frag { + // concat of failure is failure + if f1.i == 0 || f2.i == 0 { + return frag{} + } + + // TODO: elide nop + + f1.out.patch(c.p, f2.i) + return frag{f1.i, f2.out, f1.nullable && f2.nullable} +} + +func (c *compiler) alt(f1, f2 frag) frag { + // alt of failure is other + if f1.i == 0 { + return f2 + } + if f2.i == 0 { + return f1 + } + + f := c.inst(InstAlt) + i := &c.p.Inst[f.i] + i.Out = f1.i + i.Arg = f2.i + f.out = f1.out.append(c.p, f2.out) + f.nullable = f1.nullable || f2.nullable + return f +} + +func (c *compiler) quest(f1 frag, nongreedy bool) frag { + f := c.inst(InstAlt) + i := &c.p.Inst[f.i] + if nongreedy { + i.Arg = f1.i + f.out = makePatchList(f.i << 1) + } else { + i.Out = f1.i + f.out = makePatchList(f.i<<1 | 1) + } + f.out = f.out.append(c.p, f1.out) + return f +} + +// loop returns the fragment for the main loop of a plus or star. +// For plus, it can be used after changing the entry to f1.i. +// For star, it can be used directly when f1 can't match an empty string. +// (When f1 can match an empty string, f1* must be implemented as (f1+)? +// to get the priority match order correct.) +func (c *compiler) loop(f1 frag, nongreedy bool) frag { + f := c.inst(InstAlt) + i := &c.p.Inst[f.i] + if nongreedy { + i.Arg = f1.i + f.out = makePatchList(f.i << 1) + } else { + i.Out = f1.i + f.out = makePatchList(f.i<<1 | 1) + } + f1.out.patch(c.p, f.i) + return f +} + +func (c *compiler) star(f1 frag, nongreedy bool) frag { + if f1.nullable { + // Use (f1+)? to get priority match order correct. + // See golang.org/issue/46123. + return c.quest(c.plus(f1, nongreedy), nongreedy) + } + return c.loop(f1, nongreedy) +} + +func (c *compiler) plus(f1 frag, nongreedy bool) frag { + return frag{f1.i, c.loop(f1, nongreedy).out, f1.nullable} +} + +func (c *compiler) empty(op EmptyOp) frag { + f := c.inst(InstEmptyWidth) + c.p.Inst[f.i].Arg = uint32(op) + f.out = makePatchList(f.i << 1) + return f +} + +func (c *compiler) rune(r []rune, flags Flags) frag { + f := c.inst(InstRune) + f.nullable = false + i := &c.p.Inst[f.i] + i.Rune = r + flags &= FoldCase // only relevant flag is FoldCase + if len(r) != 1 || unicode.SimpleFold(r[0]) == r[0] { + // and sometimes not even that + flags &^= FoldCase + } + i.Arg = uint32(flags) + f.out = makePatchList(f.i << 1) + + // Special cases for exec machine. + switch { + case flags&FoldCase == 0 && (len(r) == 1 || len(r) == 2 && r[0] == r[1]): + i.Op = InstRune1 + case len(r) == 2 && r[0] == 0 && r[1] == unicode.MaxRune: + i.Op = InstRuneAny + case len(r) == 4 && r[0] == 0 && r[1] == '\n'-1 && r[2] == '\n'+1 && r[3] == unicode.MaxRune: + i.Op = InstRuneAnyNotNL + } + + return f +} diff --git a/vendor/github.com/grafana/regexp/syntax/doc.go b/vendor/github.com/grafana/regexp/syntax/doc.go new file mode 100644 index 00000000..877f1043 --- /dev/null +++ b/vendor/github.com/grafana/regexp/syntax/doc.go @@ -0,0 +1,142 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by mksyntaxgo from the RE2 distribution. DO NOT EDIT. + +/* +Package syntax parses regular expressions into parse trees and compiles +parse trees into programs. Most clients of regular expressions will use the +facilities of package [regexp] (such as [regexp.Compile] and [regexp.Match]) instead of this package. + +# Syntax + +The regular expression syntax understood by this package when parsing with the [Perl] flag is as follows. +Parts of the syntax can be disabled by passing alternate flags to [Parse]. + +Single characters: + + . any character, possibly including newline (flag s=true) + [xyz] character class + [^xyz] negated character class + \d Perl character class + \D negated Perl character class + [[:alpha:]] ASCII character class + [[:^alpha:]] negated ASCII character class + \pN Unicode character class (one-letter name) + \p{Greek} Unicode character class + \PN negated Unicode character class (one-letter name) + \P{Greek} negated Unicode character class + +Composites: + + xy x followed by y + x|y x or y (prefer x) + +Repetitions: + + x* zero or more x, prefer more + x+ one or more x, prefer more + x? zero or one x, prefer one + x{n,m} n or n+1 or ... or m x, prefer more + x{n,} n or more x, prefer more + x{n} exactly n x + x*? zero or more x, prefer fewer + x+? one or more x, prefer fewer + x?? zero or one x, prefer zero + x{n,m}? n or n+1 or ... or m x, prefer fewer + x{n,}? n or more x, prefer fewer + x{n}? exactly n x + +Implementation restriction: The counting forms x{n,m}, x{n,}, and x{n} +reject forms that create a minimum or maximum repetition count above 1000. +Unlimited repetitions are not subject to this restriction. + +Grouping: + + (re) numbered capturing group (submatch) + (?Pre) named & numbered capturing group (submatch) + (?re) named & numbered capturing group (submatch) + (?:re) non-capturing group + (?flags) set flags within current group; non-capturing + (?flags:re) set flags during re; non-capturing + + Flag syntax is xyz (set) or -xyz (clear) or xy-z (set xy, clear z). The flags are: + + i case-insensitive (default false) + m multi-line mode: ^ and $ match begin/end line in addition to begin/end text (default false) + s let . match \n (default false) + U ungreedy: swap meaning of x* and x*?, x+ and x+?, etc (default false) + +Empty strings: + + ^ at beginning of text or line (flag m=true) + $ at end of text (like \z not \Z) or line (flag m=true) + \A at beginning of text + \b at ASCII word boundary (\w on one side and \W, \A, or \z on the other) + \B not at ASCII word boundary + \z at end of text + +Escape sequences: + + \a bell (== \007) + \f form feed (== \014) + \t horizontal tab (== \011) + \n newline (== \012) + \r carriage return (== \015) + \v vertical tab character (== \013) + \* literal *, for any punctuation character * + \123 octal character code (up to three digits) + \x7F hex character code (exactly two digits) + \x{10FFFF} hex character code + \Q...\E literal text ... even if ... has punctuation + +Character class elements: + + x single character + A-Z character range (inclusive) + \d Perl character class + [:foo:] ASCII character class foo + \p{Foo} Unicode character class Foo + \pF Unicode character class F (one-letter name) + +Named character classes as character class elements: + + [\d] digits (== \d) + [^\d] not digits (== \D) + [\D] not digits (== \D) + [^\D] not not digits (== \d) + [[:name:]] named ASCII class inside character class (== [:name:]) + [^[:name:]] named ASCII class inside negated character class (== [:^name:]) + [\p{Name}] named Unicode property inside character class (== \p{Name}) + [^\p{Name}] named Unicode property inside negated character class (== \P{Name}) + +Perl character classes (all ASCII-only): + + \d digits (== [0-9]) + \D not digits (== [^0-9]) + \s whitespace (== [\t\n\f\r ]) + \S not whitespace (== [^\t\n\f\r ]) + \w word characters (== [0-9A-Za-z_]) + \W not word characters (== [^0-9A-Za-z_]) + +ASCII character classes: + + [[:alnum:]] alphanumeric (== [0-9A-Za-z]) + [[:alpha:]] alphabetic (== [A-Za-z]) + [[:ascii:]] ASCII (== [\x00-\x7F]) + [[:blank:]] blank (== [\t ]) + [[:cntrl:]] control (== [\x00-\x1F\x7F]) + [[:digit:]] digits (== [0-9]) + [[:graph:]] graphical (== [!-~] == [A-Za-z0-9!"#$%&'()*+,\-./:;<=>?@[\\\]^_`{|}~]) + [[:lower:]] lower case (== [a-z]) + [[:print:]] printable (== [ -~] == [ [:graph:]]) + [[:punct:]] punctuation (== [!-/:-@[-`{-~]) + [[:space:]] whitespace (== [\t\n\v\f\r ]) + [[:upper:]] upper case (== [A-Z]) + [[:word:]] word characters (== [0-9A-Za-z_]) + [[:xdigit:]] hex digit (== [0-9A-Fa-f]) + +Unicode character classes are those in [unicode.Categories] and [unicode.Scripts]. +*/ +package syntax diff --git a/vendor/github.com/grafana/regexp/syntax/make_perl_groups.pl b/vendor/github.com/grafana/regexp/syntax/make_perl_groups.pl new file mode 100644 index 00000000..80a2c9ae --- /dev/null +++ b/vendor/github.com/grafana/regexp/syntax/make_perl_groups.pl @@ -0,0 +1,113 @@ +#!/usr/bin/perl +# Copyright 2008 The Go Authors. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +# Modified version of RE2's make_perl_groups.pl. + +# Generate table entries giving character ranges +# for POSIX/Perl character classes. Rather than +# figure out what the definition is, it is easier to ask +# Perl about each letter from 0-128 and write down +# its answer. + +@posixclasses = ( + "[:alnum:]", + "[:alpha:]", + "[:ascii:]", + "[:blank:]", + "[:cntrl:]", + "[:digit:]", + "[:graph:]", + "[:lower:]", + "[:print:]", + "[:punct:]", + "[:space:]", + "[:upper:]", + "[:word:]", + "[:xdigit:]", +); + +@perlclasses = ( + "\\d", + "\\s", + "\\w", +); + +%overrides = ( + # Prior to Perl 5.18, \s did not match vertical tab. + # RE2 preserves that original behaviour. + "\\s:11" => 0, +); + +sub ComputeClass($) { + my @ranges; + my ($class) = @_; + my $regexp = "[$class]"; + my $start = -1; + for (my $i=0; $i<=129; $i++) { + if ($i == 129) { $i = 256; } + if ($i <= 128 && ($overrides{"$class:$i"} // chr($i) =~ $regexp)) { + if ($start < 0) { + $start = $i; + } + } else { + if ($start >= 0) { + push @ranges, [$start, $i-1]; + } + $start = -1; + } + } + return @ranges; +} + +sub PrintClass($$@) { + my ($cname, $name, @ranges) = @_; + print "var code$cname = []rune{ /* $name */\n"; + for (my $i=0; $i<@ranges; $i++) { + my @a = @{$ranges[$i]}; + printf "\t0x%x, 0x%x,\n", $a[0], $a[1]; + } + print "}\n\n"; + my $n = @ranges; + $negname = $name; + if ($negname =~ /:/) { + $negname =~ s/:/:^/; + } else { + $negname =~ y/a-z/A-Z/; + } + return "\t`$name`: {+1, code$cname},\n" . + "\t`$negname`: {-1, code$cname},\n"; +} + +my $gen = 0; + +sub PrintClasses($@) { + my ($cname, @classes) = @_; + my @entries; + foreach my $cl (@classes) { + my @ranges = ComputeClass($cl); + push @entries, PrintClass(++$gen, $cl, @ranges); + } + print "var ${cname}Group = map[string]charGroup{\n"; + foreach my $e (@entries) { + print $e; + } + print "}\n"; + my $count = @entries; +} + +print <perl_groups.go + +package syntax + +EOF + +PrintClasses("perl", @perlclasses); +PrintClasses("posix", @posixclasses); diff --git a/vendor/github.com/grafana/regexp/syntax/op_string.go b/vendor/github.com/grafana/regexp/syntax/op_string.go new file mode 100644 index 00000000..1368f5b7 --- /dev/null +++ b/vendor/github.com/grafana/regexp/syntax/op_string.go @@ -0,0 +1,52 @@ +// Code generated by "stringer -type Op -trimprefix Op"; DO NOT EDIT. + +package syntax + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[OpNoMatch-1] + _ = x[OpEmptyMatch-2] + _ = x[OpLiteral-3] + _ = x[OpCharClass-4] + _ = x[OpAnyCharNotNL-5] + _ = x[OpAnyChar-6] + _ = x[OpBeginLine-7] + _ = x[OpEndLine-8] + _ = x[OpBeginText-9] + _ = x[OpEndText-10] + _ = x[OpWordBoundary-11] + _ = x[OpNoWordBoundary-12] + _ = x[OpCapture-13] + _ = x[OpStar-14] + _ = x[OpPlus-15] + _ = x[OpQuest-16] + _ = x[OpRepeat-17] + _ = x[OpConcat-18] + _ = x[OpAlternate-19] + _ = x[opPseudo-128] +} + +const ( + _Op_name_0 = "NoMatchEmptyMatchLiteralCharClassAnyCharNotNLAnyCharBeginLineEndLineBeginTextEndTextWordBoundaryNoWordBoundaryCaptureStarPlusQuestRepeatConcatAlternate" + _Op_name_1 = "opPseudo" +) + +var ( + _Op_index_0 = [...]uint8{0, 7, 17, 24, 33, 45, 52, 61, 68, 77, 84, 96, 110, 117, 121, 125, 130, 136, 142, 151} +) + +func (i Op) String() string { + switch { + case 1 <= i && i <= 19: + i -= 1 + return _Op_name_0[_Op_index_0[i]:_Op_index_0[i+1]] + case i == 128: + return _Op_name_1 + default: + return "Op(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/vendor/github.com/grafana/regexp/syntax/parse.go b/vendor/github.com/grafana/regexp/syntax/parse.go new file mode 100644 index 00000000..6ed6491c --- /dev/null +++ b/vendor/github.com/grafana/regexp/syntax/parse.go @@ -0,0 +1,2136 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package syntax + +import ( + "sort" + "strings" + "unicode" + "unicode/utf8" +) + +// An Error describes a failure to parse a regular expression +// and gives the offending expression. +type Error struct { + Code ErrorCode + Expr string +} + +func (e *Error) Error() string { + return "error parsing regexp: " + e.Code.String() + ": `" + e.Expr + "`" +} + +// An ErrorCode describes a failure to parse a regular expression. +type ErrorCode string + +const ( + // Unexpected error + ErrInternalError ErrorCode = "regexp/syntax: internal error" + + // Parse errors + ErrInvalidCharClass ErrorCode = "invalid character class" + ErrInvalidCharRange ErrorCode = "invalid character class range" + ErrInvalidEscape ErrorCode = "invalid escape sequence" + ErrInvalidNamedCapture ErrorCode = "invalid named capture" + ErrInvalidPerlOp ErrorCode = "invalid or unsupported Perl syntax" + ErrInvalidRepeatOp ErrorCode = "invalid nested repetition operator" + ErrInvalidRepeatSize ErrorCode = "invalid repeat count" + ErrInvalidUTF8 ErrorCode = "invalid UTF-8" + ErrMissingBracket ErrorCode = "missing closing ]" + ErrMissingParen ErrorCode = "missing closing )" + ErrMissingRepeatArgument ErrorCode = "missing argument to repetition operator" + ErrTrailingBackslash ErrorCode = "trailing backslash at end of expression" + ErrUnexpectedParen ErrorCode = "unexpected )" + ErrNestingDepth ErrorCode = "expression nests too deeply" + ErrLarge ErrorCode = "expression too large" +) + +func (e ErrorCode) String() string { + return string(e) +} + +// Flags control the behavior of the parser and record information about regexp context. +type Flags uint16 + +const ( + FoldCase Flags = 1 << iota // case-insensitive match + Literal // treat pattern as literal string + ClassNL // allow character classes like [^a-z] and [[:space:]] to match newline + DotNL // allow . to match newline + OneLine // treat ^ and $ as only matching at beginning and end of text + NonGreedy // make repetition operators default to non-greedy + PerlX // allow Perl extensions + UnicodeGroups // allow \p{Han}, \P{Han} for Unicode group and negation + WasDollar // regexp OpEndText was $, not \z + Simple // regexp contains no counted repetition + + MatchNL = ClassNL | DotNL + + Perl = ClassNL | OneLine | PerlX | UnicodeGroups // as close to Perl as possible + POSIX Flags = 0 // POSIX syntax +) + +// Pseudo-ops for parsing stack. +const ( + opLeftParen = opPseudo + iota + opVerticalBar +) + +// maxHeight is the maximum height of a regexp parse tree. +// It is somewhat arbitrarily chosen, but the idea is to be large enough +// that no one will actually hit in real use but at the same time small enough +// that recursion on the Regexp tree will not hit the 1GB Go stack limit. +// The maximum amount of stack for a single recursive frame is probably +// closer to 1kB, so this could potentially be raised, but it seems unlikely +// that people have regexps nested even this deeply. +// We ran a test on Google's C++ code base and turned up only +// a single use case with depth > 100; it had depth 128. +// Using depth 1000 should be plenty of margin. +// As an optimization, we don't even bother calculating heights +// until we've allocated at least maxHeight Regexp structures. +const maxHeight = 1000 + +// maxSize is the maximum size of a compiled regexp in Insts. +// It too is somewhat arbitrarily chosen, but the idea is to be large enough +// to allow significant regexps while at the same time small enough that +// the compiled form will not take up too much memory. +// 128 MB is enough for a 3.3 million Inst structures, which roughly +// corresponds to a 3.3 MB regexp. +const ( + maxSize = 128 << 20 / instSize + instSize = 5 * 8 // byte, 2 uint32, slice is 5 64-bit words +) + +// maxRunes is the maximum number of runes allowed in a regexp tree +// counting the runes in all the nodes. +// Ignoring character classes p.numRunes is always less than the length of the regexp. +// Character classes can make it much larger: each \pL adds 1292 runes. +// 128 MB is enough for 32M runes, which is over 26k \pL instances. +// Note that repetitions do not make copies of the rune slices, +// so \pL{1000} is only one rune slice, not 1000. +// We could keep a cache of character classes we've seen, +// so that all the \pL we see use the same rune list, +// but that doesn't remove the problem entirely: +// consider something like [\pL01234][\pL01235][\pL01236]...[\pL^&*()]. +// And because the Rune slice is exposed directly in the Regexp, +// there is not an opportunity to change the representation to allow +// partial sharing between different character classes. +// So the limit is the best we can do. +const ( + maxRunes = 128 << 20 / runeSize + runeSize = 4 // rune is int32 +) + +type parser struct { + flags Flags // parse mode flags + stack []*Regexp // stack of parsed expressions + free *Regexp + numCap int // number of capturing groups seen + wholeRegexp string + tmpClass []rune // temporary char class work space + numRegexp int // number of regexps allocated + numRunes int // number of runes in char classes + repeats int64 // product of all repetitions seen + height map[*Regexp]int // regexp height, for height limit check + size map[*Regexp]int64 // regexp compiled size, for size limit check +} + +func (p *parser) newRegexp(op Op) *Regexp { + re := p.free + if re != nil { + p.free = re.Sub0[0] + *re = Regexp{} + } else { + re = new(Regexp) + p.numRegexp++ + } + re.Op = op + return re +} + +func (p *parser) reuse(re *Regexp) { + if p.height != nil { + delete(p.height, re) + } + re.Sub0[0] = p.free + p.free = re +} + +func (p *parser) checkLimits(re *Regexp) { + if p.numRunes > maxRunes { + panic(ErrLarge) + } + p.checkSize(re) + p.checkHeight(re) +} + +func (p *parser) checkSize(re *Regexp) { + if p.size == nil { + // We haven't started tracking size yet. + // Do a relatively cheap check to see if we need to start. + // Maintain the product of all the repeats we've seen + // and don't track if the total number of regexp nodes + // we've seen times the repeat product is in budget. + if p.repeats == 0 { + p.repeats = 1 + } + if re.Op == OpRepeat { + n := re.Max + if n == -1 { + n = re.Min + } + if n <= 0 { + n = 1 + } + if int64(n) > maxSize/p.repeats { + p.repeats = maxSize + } else { + p.repeats *= int64(n) + } + } + if int64(p.numRegexp) < maxSize/p.repeats { + return + } + + // We need to start tracking size. + // Make the map and belatedly populate it + // with info about everything we've constructed so far. + p.size = make(map[*Regexp]int64) + for _, re := range p.stack { + p.checkSize(re) + } + } + + if p.calcSize(re, true) > maxSize { + panic(ErrLarge) + } +} + +func (p *parser) calcSize(re *Regexp, force bool) int64 { + if !force { + if size, ok := p.size[re]; ok { + return size + } + } + + var size int64 + switch re.Op { + case OpLiteral: + size = int64(len(re.Rune)) + case OpCapture, OpStar: + // star can be 1+ or 2+; assume 2 pessimistically + size = 2 + p.calcSize(re.Sub[0], false) + case OpPlus, OpQuest: + size = 1 + p.calcSize(re.Sub[0], false) + case OpConcat: + for _, sub := range re.Sub { + size += p.calcSize(sub, false) + } + case OpAlternate: + for _, sub := range re.Sub { + size += p.calcSize(sub, false) + } + if len(re.Sub) > 1 { + size += int64(len(re.Sub)) - 1 + } + case OpRepeat: + sub := p.calcSize(re.Sub[0], false) + if re.Max == -1 { + if re.Min == 0 { + size = 2 + sub // x* + } else { + size = 1 + int64(re.Min)*sub // xxx+ + } + break + } + // x{2,5} = xx(x(x(x)?)?)? + size = int64(re.Max)*sub + int64(re.Max-re.Min) + } + + size = max(1, size) + p.size[re] = size + return size +} + +func (p *parser) checkHeight(re *Regexp) { + if p.numRegexp < maxHeight { + return + } + if p.height == nil { + p.height = make(map[*Regexp]int) + for _, re := range p.stack { + p.checkHeight(re) + } + } + if p.calcHeight(re, true) > maxHeight { + panic(ErrNestingDepth) + } +} + +func (p *parser) calcHeight(re *Regexp, force bool) int { + if !force { + if h, ok := p.height[re]; ok { + return h + } + } + h := 1 + for _, sub := range re.Sub { + hsub := p.calcHeight(sub, false) + if h < 1+hsub { + h = 1 + hsub + } + } + p.height[re] = h + return h +} + +// Parse stack manipulation. + +// push pushes the regexp re onto the parse stack and returns the regexp. +func (p *parser) push(re *Regexp) *Regexp { + p.numRunes += len(re.Rune) + if re.Op == OpCharClass && len(re.Rune) == 2 && re.Rune[0] == re.Rune[1] { + // Single rune. + if p.maybeConcat(re.Rune[0], p.flags&^FoldCase) { + return nil + } + re.Op = OpLiteral + re.Rune = re.Rune[:1] + re.Flags = p.flags &^ FoldCase + } else if re.Op == OpCharClass && len(re.Rune) == 4 && + re.Rune[0] == re.Rune[1] && re.Rune[2] == re.Rune[3] && + unicode.SimpleFold(re.Rune[0]) == re.Rune[2] && + unicode.SimpleFold(re.Rune[2]) == re.Rune[0] || + re.Op == OpCharClass && len(re.Rune) == 2 && + re.Rune[0]+1 == re.Rune[1] && + unicode.SimpleFold(re.Rune[0]) == re.Rune[1] && + unicode.SimpleFold(re.Rune[1]) == re.Rune[0] { + // Case-insensitive rune like [Aa] or [Δδ]. + if p.maybeConcat(re.Rune[0], p.flags|FoldCase) { + return nil + } + + // Rewrite as (case-insensitive) literal. + re.Op = OpLiteral + re.Rune = re.Rune[:1] + re.Flags = p.flags | FoldCase + } else { + // Incremental concatenation. + p.maybeConcat(-1, 0) + } + + p.stack = append(p.stack, re) + p.checkLimits(re) + return re +} + +// maybeConcat implements incremental concatenation +// of literal runes into string nodes. The parser calls this +// before each push, so only the top fragment of the stack +// might need processing. Since this is called before a push, +// the topmost literal is no longer subject to operators like * +// (Otherwise ab* would turn into (ab)*.) +// If r >= 0 and there's a node left over, maybeConcat uses it +// to push r with the given flags. +// maybeConcat reports whether r was pushed. +func (p *parser) maybeConcat(r rune, flags Flags) bool { + n := len(p.stack) + if n < 2 { + return false + } + + re1 := p.stack[n-1] + re2 := p.stack[n-2] + if re1.Op != OpLiteral || re2.Op != OpLiteral || re1.Flags&FoldCase != re2.Flags&FoldCase { + return false + } + + // Push re1 into re2. + re2.Rune = append(re2.Rune, re1.Rune...) + + // Reuse re1 if possible. + if r >= 0 { + re1.Rune = re1.Rune0[:1] + re1.Rune[0] = r + re1.Flags = flags + return true + } + + p.stack = p.stack[:n-1] + p.reuse(re1) + return false // did not push r +} + +// literal pushes a literal regexp for the rune r on the stack. +func (p *parser) literal(r rune) { + re := p.newRegexp(OpLiteral) + re.Flags = p.flags + if p.flags&FoldCase != 0 { + r = minFoldRune(r) + } + re.Rune0[0] = r + re.Rune = re.Rune0[:1] + p.push(re) +} + +// minFoldRune returns the minimum rune fold-equivalent to r. +func minFoldRune(r rune) rune { + if r < minFold || r > maxFold { + return r + } + m := r + r0 := r + for r = unicode.SimpleFold(r); r != r0; r = unicode.SimpleFold(r) { + m = min(m, r) + } + return m +} + +// op pushes a regexp with the given op onto the stack +// and returns that regexp. +func (p *parser) op(op Op) *Regexp { + re := p.newRegexp(op) + re.Flags = p.flags + return p.push(re) +} + +// repeat replaces the top stack element with itself repeated according to op, min, max. +// before is the regexp suffix starting at the repetition operator. +// after is the regexp suffix following after the repetition operator. +// repeat returns an updated 'after' and an error, if any. +func (p *parser) repeat(op Op, min, max int, before, after, lastRepeat string) (string, error) { + flags := p.flags + if p.flags&PerlX != 0 { + if len(after) > 0 && after[0] == '?' { + after = after[1:] + flags ^= NonGreedy + } + if lastRepeat != "" { + // In Perl it is not allowed to stack repetition operators: + // a** is a syntax error, not a doubled star, and a++ means + // something else entirely, which we don't support! + return "", &Error{ErrInvalidRepeatOp, lastRepeat[:len(lastRepeat)-len(after)]} + } + } + n := len(p.stack) + if n == 0 { + return "", &Error{ErrMissingRepeatArgument, before[:len(before)-len(after)]} + } + sub := p.stack[n-1] + if sub.Op >= opPseudo { + return "", &Error{ErrMissingRepeatArgument, before[:len(before)-len(after)]} + } + + re := p.newRegexp(op) + re.Min = min + re.Max = max + re.Flags = flags + re.Sub = re.Sub0[:1] + re.Sub[0] = sub + p.stack[n-1] = re + p.checkLimits(re) + + if op == OpRepeat && (min >= 2 || max >= 2) && !repeatIsValid(re, 1000) { + return "", &Error{ErrInvalidRepeatSize, before[:len(before)-len(after)]} + } + + return after, nil +} + +// repeatIsValid reports whether the repetition re is valid. +// Valid means that the combination of the top-level repetition +// and any inner repetitions does not exceed n copies of the +// innermost thing. +// This function rewalks the regexp tree and is called for every repetition, +// so we have to worry about inducing quadratic behavior in the parser. +// We avoid this by only calling repeatIsValid when min or max >= 2. +// In that case the depth of any >= 2 nesting can only get to 9 without +// triggering a parse error, so each subtree can only be rewalked 9 times. +func repeatIsValid(re *Regexp, n int) bool { + if re.Op == OpRepeat { + m := re.Max + if m == 0 { + return true + } + if m < 0 { + m = re.Min + } + if m > n { + return false + } + if m > 0 { + n /= m + } + } + for _, sub := range re.Sub { + if !repeatIsValid(sub, n) { + return false + } + } + return true +} + +// concat replaces the top of the stack (above the topmost '|' or '(') with its concatenation. +func (p *parser) concat() *Regexp { + p.maybeConcat(-1, 0) + + // Scan down to find pseudo-operator | or (. + i := len(p.stack) + for i > 0 && p.stack[i-1].Op < opPseudo { + i-- + } + subs := p.stack[i:] + p.stack = p.stack[:i] + + // Empty concatenation is special case. + if len(subs) == 0 { + return p.push(p.newRegexp(OpEmptyMatch)) + } + + return p.push(p.collapse(subs, OpConcat)) +} + +// alternate replaces the top of the stack (above the topmost '(') with its alternation. +func (p *parser) alternate() *Regexp { + // Scan down to find pseudo-operator (. + // There are no | above (. + i := len(p.stack) + for i > 0 && p.stack[i-1].Op < opPseudo { + i-- + } + subs := p.stack[i:] + p.stack = p.stack[:i] + + // Make sure top class is clean. + // All the others already are (see swapVerticalBar). + if len(subs) > 0 { + cleanAlt(subs[len(subs)-1]) + } + + // Empty alternate is special case + // (shouldn't happen but easy to handle). + if len(subs) == 0 { + return p.push(p.newRegexp(OpNoMatch)) + } + + return p.push(p.collapse(subs, OpAlternate)) +} + +// cleanAlt cleans re for eventual inclusion in an alternation. +func cleanAlt(re *Regexp) { + switch re.Op { + case OpCharClass: + re.Rune = cleanClass(&re.Rune) + if len(re.Rune) == 2 && re.Rune[0] == 0 && re.Rune[1] == unicode.MaxRune { + re.Rune = nil + re.Op = OpAnyChar + return + } + if len(re.Rune) == 4 && re.Rune[0] == 0 && re.Rune[1] == '\n'-1 && re.Rune[2] == '\n'+1 && re.Rune[3] == unicode.MaxRune { + re.Rune = nil + re.Op = OpAnyCharNotNL + return + } + if cap(re.Rune)-len(re.Rune) > 100 { + // re.Rune will not grow any more. + // Make a copy or inline to reclaim storage. + re.Rune = append(re.Rune0[:0], re.Rune...) + } + } +} + +// collapse returns the result of applying op to sub. +// If sub contains op nodes, they all get hoisted up +// so that there is never a concat of a concat or an +// alternate of an alternate. +func (p *parser) collapse(subs []*Regexp, op Op) *Regexp { + if len(subs) == 1 { + return subs[0] + } + re := p.newRegexp(op) + re.Sub = re.Sub0[:0] + for _, sub := range subs { + if sub.Op == op { + re.Sub = append(re.Sub, sub.Sub...) + p.reuse(sub) + } else { + re.Sub = append(re.Sub, sub) + } + } + if op == OpAlternate { + re.Sub = p.factor(re.Sub) + if len(re.Sub) == 1 { + old := re + re = re.Sub[0] + p.reuse(old) + } + } + return re +} + +// factor factors common prefixes from the alternation list sub. +// It returns a replacement list that reuses the same storage and +// frees (passes to p.reuse) any removed *Regexps. +// +// For example, +// +// ABC|ABD|AEF|BCX|BCY +// +// simplifies by literal prefix extraction to +// +// A(B(C|D)|EF)|BC(X|Y) +// +// which simplifies by character class introduction to +// +// A(B[CD]|EF)|BC[XY] +func (p *parser) factor(sub []*Regexp) []*Regexp { + if len(sub) < 2 { + return sub + } + + // Round 1: Factor out common literal prefixes. + var str []rune + var strflags Flags + start := 0 + out := sub[:0] + for i := 0; i <= len(sub); i++ { + // Invariant: the Regexps that were in sub[0:start] have been + // used or marked for reuse, and the slice space has been reused + // for out (len(out) <= start). + // + // Invariant: sub[start:i] consists of regexps that all begin + // with str as modified by strflags. + var istr []rune + var iflags Flags + if i < len(sub) { + istr, iflags = p.leadingString(sub[i]) + if iflags == strflags { + same := 0 + for same < len(str) && same < len(istr) && str[same] == istr[same] { + same++ + } + if same > 0 { + // Matches at least one rune in current range. + // Keep going around. + str = str[:same] + continue + } + } + } + + // Found end of a run with common leading literal string: + // sub[start:i] all begin with str[0:len(str)], but sub[i] + // does not even begin with str[0]. + // + // Factor out common string and append factored expression to out. + if i == start { + // Nothing to do - run of length 0. + } else if i == start+1 { + // Just one: don't bother factoring. + out = append(out, sub[start]) + } else { + // Construct factored form: prefix(suffix1|suffix2|...) + prefix := p.newRegexp(OpLiteral) + prefix.Flags = strflags + prefix.Rune = append(prefix.Rune[:0], str...) + + for j := start; j < i; j++ { + sub[j] = p.removeLeadingString(sub[j], len(str)) + p.checkLimits(sub[j]) + } + suffix := p.collapse(sub[start:i], OpAlternate) // recurse + + re := p.newRegexp(OpConcat) + re.Sub = append(re.Sub[:0], prefix, suffix) + out = append(out, re) + } + + // Prepare for next iteration. + start = i + str = istr + strflags = iflags + } + sub = out + + // Round 2: Factor out common simple prefixes, + // just the first piece of each concatenation. + // This will be good enough a lot of the time. + // + // Complex subexpressions (e.g. involving quantifiers) + // are not safe to factor because that collapses their + // distinct paths through the automaton, which affects + // correctness in some cases. + start = 0 + out = sub[:0] + var first *Regexp + for i := 0; i <= len(sub); i++ { + // Invariant: the Regexps that were in sub[0:start] have been + // used or marked for reuse, and the slice space has been reused + // for out (len(out) <= start). + // + // Invariant: sub[start:i] consists of regexps that all begin with ifirst. + var ifirst *Regexp + if i < len(sub) { + ifirst = p.leadingRegexp(sub[i]) + if first != nil && first.Equal(ifirst) && + // first must be a character class OR a fixed repeat of a character class. + (isCharClass(first) || (first.Op == OpRepeat && first.Min == first.Max && isCharClass(first.Sub[0]))) { + continue + } + } + + // Found end of a run with common leading regexp: + // sub[start:i] all begin with first but sub[i] does not. + // + // Factor out common regexp and append factored expression to out. + if i == start { + // Nothing to do - run of length 0. + } else if i == start+1 { + // Just one: don't bother factoring. + out = append(out, sub[start]) + } else { + // Construct factored form: prefix(suffix1|suffix2|...) + prefix := first + for j := start; j < i; j++ { + reuse := j != start // prefix came from sub[start] + sub[j] = p.removeLeadingRegexp(sub[j], reuse) + p.checkLimits(sub[j]) + } + suffix := p.collapse(sub[start:i], OpAlternate) // recurse + + re := p.newRegexp(OpConcat) + re.Sub = append(re.Sub[:0], prefix, suffix) + out = append(out, re) + } + + // Prepare for next iteration. + start = i + first = ifirst + } + sub = out + + // Round 3: Collapse runs of single literals into character classes. + start = 0 + out = sub[:0] + for i := 0; i <= len(sub); i++ { + // Invariant: the Regexps that were in sub[0:start] have been + // used or marked for reuse, and the slice space has been reused + // for out (len(out) <= start). + // + // Invariant: sub[start:i] consists of regexps that are either + // literal runes or character classes. + if i < len(sub) && isCharClass(sub[i]) { + continue + } + + // sub[i] is not a char or char class; + // emit char class for sub[start:i]... + if i == start { + // Nothing to do - run of length 0. + } else if i == start+1 { + out = append(out, sub[start]) + } else { + // Make new char class. + // Start with most complex regexp in sub[start]. + max := start + for j := start + 1; j < i; j++ { + if sub[max].Op < sub[j].Op || sub[max].Op == sub[j].Op && len(sub[max].Rune) < len(sub[j].Rune) { + max = j + } + } + sub[start], sub[max] = sub[max], sub[start] + + for j := start + 1; j < i; j++ { + mergeCharClass(sub[start], sub[j]) + p.reuse(sub[j]) + } + cleanAlt(sub[start]) + out = append(out, sub[start]) + } + + // ... and then emit sub[i]. + if i < len(sub) { + out = append(out, sub[i]) + } + start = i + 1 + } + sub = out + + // Round 4: Collapse runs of empty matches into a single empty match. + start = 0 + out = sub[:0] + for i := range sub { + if i+1 < len(sub) && sub[i].Op == OpEmptyMatch && sub[i+1].Op == OpEmptyMatch { + continue + } + out = append(out, sub[i]) + } + sub = out + + return sub +} + +// leadingString returns the leading literal string that re begins with. +// The string refers to storage in re or its children. +func (p *parser) leadingString(re *Regexp) ([]rune, Flags) { + if re.Op == OpConcat && len(re.Sub) > 0 { + re = re.Sub[0] + } + if re.Op != OpLiteral { + return nil, 0 + } + return re.Rune, re.Flags & FoldCase +} + +// removeLeadingString removes the first n leading runes +// from the beginning of re. It returns the replacement for re. +func (p *parser) removeLeadingString(re *Regexp, n int) *Regexp { + if re.Op == OpConcat && len(re.Sub) > 0 { + // Removing a leading string in a concatenation + // might simplify the concatenation. + sub := re.Sub[0] + sub = p.removeLeadingString(sub, n) + re.Sub[0] = sub + if sub.Op == OpEmptyMatch { + p.reuse(sub) + switch len(re.Sub) { + case 0, 1: + // Impossible but handle. + re.Op = OpEmptyMatch + re.Sub = nil + case 2: + old := re + re = re.Sub[1] + p.reuse(old) + default: + copy(re.Sub, re.Sub[1:]) + re.Sub = re.Sub[:len(re.Sub)-1] + } + } + return re + } + + if re.Op == OpLiteral { + re.Rune = re.Rune[:copy(re.Rune, re.Rune[n:])] + if len(re.Rune) == 0 { + re.Op = OpEmptyMatch + } + } + return re +} + +// leadingRegexp returns the leading regexp that re begins with. +// The regexp refers to storage in re or its children. +func (p *parser) leadingRegexp(re *Regexp) *Regexp { + if re.Op == OpEmptyMatch { + return nil + } + if re.Op == OpConcat && len(re.Sub) > 0 { + sub := re.Sub[0] + if sub.Op == OpEmptyMatch { + return nil + } + return sub + } + return re +} + +// removeLeadingRegexp removes the leading regexp in re. +// It returns the replacement for re. +// If reuse is true, it passes the removed regexp (if no longer needed) to p.reuse. +func (p *parser) removeLeadingRegexp(re *Regexp, reuse bool) *Regexp { + if re.Op == OpConcat && len(re.Sub) > 0 { + if reuse { + p.reuse(re.Sub[0]) + } + re.Sub = re.Sub[:copy(re.Sub, re.Sub[1:])] + switch len(re.Sub) { + case 0: + re.Op = OpEmptyMatch + re.Sub = nil + case 1: + old := re + re = re.Sub[0] + p.reuse(old) + } + return re + } + if reuse { + p.reuse(re) + } + return p.newRegexp(OpEmptyMatch) +} + +func literalRegexp(s string, flags Flags) *Regexp { + re := &Regexp{Op: OpLiteral} + re.Flags = flags + re.Rune = re.Rune0[:0] // use local storage for small strings + for _, c := range s { + if len(re.Rune) >= cap(re.Rune) { + // string is too long to fit in Rune0. let Go handle it + re.Rune = []rune(s) + break + } + re.Rune = append(re.Rune, c) + } + return re +} + +// Parsing. + +// Parse parses a regular expression string s, controlled by the specified +// Flags, and returns a regular expression parse tree. The syntax is +// described in the top-level comment. +func Parse(s string, flags Flags) (*Regexp, error) { + return parse(s, flags) +} + +func parse(s string, flags Flags) (_ *Regexp, err error) { + defer func() { + switch r := recover(); r { + default: + panic(r) + case nil: + // ok + case ErrLarge: // too big + err = &Error{Code: ErrLarge, Expr: s} + case ErrNestingDepth: + err = &Error{Code: ErrNestingDepth, Expr: s} + } + }() + + if flags&Literal != 0 { + // Trivial parser for literal string. + if err := checkUTF8(s); err != nil { + return nil, err + } + return literalRegexp(s, flags), nil + } + + // Otherwise, must do real work. + var ( + p parser + c rune + op Op + lastRepeat string + ) + p.flags = flags + p.wholeRegexp = s + t := s + for t != "" { + repeat := "" + BigSwitch: + switch t[0] { + default: + if c, t, err = nextRune(t); err != nil { + return nil, err + } + p.literal(c) + + case '(': + if p.flags&PerlX != 0 && len(t) >= 2 && t[1] == '?' { + // Flag changes and non-capturing groups. + if t, err = p.parsePerlFlags(t); err != nil { + return nil, err + } + break + } + p.numCap++ + p.op(opLeftParen).Cap = p.numCap + t = t[1:] + case '|': + if err = p.parseVerticalBar(); err != nil { + return nil, err + } + t = t[1:] + case ')': + if err = p.parseRightParen(); err != nil { + return nil, err + } + t = t[1:] + case '^': + if p.flags&OneLine != 0 { + p.op(OpBeginText) + } else { + p.op(OpBeginLine) + } + t = t[1:] + case '$': + if p.flags&OneLine != 0 { + p.op(OpEndText).Flags |= WasDollar + } else { + p.op(OpEndLine) + } + t = t[1:] + case '.': + if p.flags&DotNL != 0 { + p.op(OpAnyChar) + } else { + p.op(OpAnyCharNotNL) + } + t = t[1:] + case '[': + if t, err = p.parseClass(t); err != nil { + return nil, err + } + case '*', '+', '?': + before := t + switch t[0] { + case '*': + op = OpStar + case '+': + op = OpPlus + case '?': + op = OpQuest + } + after := t[1:] + if after, err = p.repeat(op, 0, 0, before, after, lastRepeat); err != nil { + return nil, err + } + repeat = before + t = after + case '{': + op = OpRepeat + before := t + min, max, after, ok := p.parseRepeat(t) + if !ok { + // If the repeat cannot be parsed, { is a literal. + p.literal('{') + t = t[1:] + break + } + if min < 0 || min > 1000 || max > 1000 || max >= 0 && min > max { + // Numbers were too big, or max is present and min > max. + return nil, &Error{ErrInvalidRepeatSize, before[:len(before)-len(after)]} + } + if after, err = p.repeat(op, min, max, before, after, lastRepeat); err != nil { + return nil, err + } + repeat = before + t = after + case '\\': + if p.flags&PerlX != 0 && len(t) >= 2 { + switch t[1] { + case 'A': + p.op(OpBeginText) + t = t[2:] + break BigSwitch + case 'b': + p.op(OpWordBoundary) + t = t[2:] + break BigSwitch + case 'B': + p.op(OpNoWordBoundary) + t = t[2:] + break BigSwitch + case 'C': + // any byte; not supported + return nil, &Error{ErrInvalidEscape, t[:2]} + case 'Q': + // \Q ... \E: the ... is always literals + var lit string + lit, t, _ = strings.Cut(t[2:], `\E`) + for lit != "" { + c, rest, err := nextRune(lit) + if err != nil { + return nil, err + } + p.literal(c) + lit = rest + } + break BigSwitch + case 'z': + p.op(OpEndText) + t = t[2:] + break BigSwitch + } + } + + re := p.newRegexp(OpCharClass) + re.Flags = p.flags + + // Look for Unicode character group like \p{Han} + if len(t) >= 2 && (t[1] == 'p' || t[1] == 'P') { + r, rest, err := p.parseUnicodeClass(t, re.Rune0[:0]) + if err != nil { + return nil, err + } + if r != nil { + re.Rune = r + t = rest + p.push(re) + break BigSwitch + } + } + + // Perl character class escape. + if r, rest := p.parsePerlClassEscape(t, re.Rune0[:0]); r != nil { + re.Rune = r + t = rest + p.push(re) + break BigSwitch + } + p.reuse(re) + + // Ordinary single-character escape. + if c, t, err = p.parseEscape(t); err != nil { + return nil, err + } + p.literal(c) + } + lastRepeat = repeat + } + + p.concat() + if p.swapVerticalBar() { + // pop vertical bar + p.stack = p.stack[:len(p.stack)-1] + } + p.alternate() + + n := len(p.stack) + if n != 1 { + return nil, &Error{ErrMissingParen, s} + } + return p.stack[0], nil +} + +// parseRepeat parses {min} (max=min) or {min,} (max=-1) or {min,max}. +// If s is not of that form, it returns ok == false. +// If s has the right form but the values are too big, it returns min == -1, ok == true. +func (p *parser) parseRepeat(s string) (min, max int, rest string, ok bool) { + if s == "" || s[0] != '{' { + return + } + s = s[1:] + var ok1 bool + if min, s, ok1 = p.parseInt(s); !ok1 { + return + } + if s == "" { + return + } + if s[0] != ',' { + max = min + } else { + s = s[1:] + if s == "" { + return + } + if s[0] == '}' { + max = -1 + } else if max, s, ok1 = p.parseInt(s); !ok1 { + return + } else if max < 0 { + // parseInt found too big a number + min = -1 + } + } + if s == "" || s[0] != '}' { + return + } + rest = s[1:] + ok = true + return +} + +// parsePerlFlags parses a Perl flag setting or non-capturing group or both, +// like (?i) or (?: or (?i:. It removes the prefix from s and updates the parse state. +// The caller must have ensured that s begins with "(?". +func (p *parser) parsePerlFlags(s string) (rest string, err error) { + t := s + + // Check for named captures, first introduced in Python's regexp library. + // As usual, there are three slightly different syntaxes: + // + // (?Pexpr) the original, introduced by Python + // (?expr) the .NET alteration, adopted by Perl 5.10 + // (?'name'expr) another .NET alteration, adopted by Perl 5.10 + // + // Perl 5.10 gave in and implemented the Python version too, + // but they claim that the last two are the preferred forms. + // PCRE and languages based on it (specifically, PHP and Ruby) + // support all three as well. EcmaScript 4 uses only the Python form. + // + // In both the open source world (via Code Search) and the + // Google source tree, (?Pname) and (?name) are the + // dominant forms of named captures and both are supported. + startsWithP := len(t) > 4 && t[2] == 'P' && t[3] == '<' + startsWithName := len(t) > 3 && t[2] == '<' + + if startsWithP || startsWithName { + // position of expr start + exprStartPos := 4 + if startsWithName { + exprStartPos = 3 + } + + // Pull out name. + end := strings.IndexRune(t, '>') + if end < 0 { + if err = checkUTF8(t); err != nil { + return "", err + } + return "", &Error{ErrInvalidNamedCapture, s} + } + + capture := t[:end+1] // "(?P" or "(?" + name := t[exprStartPos:end] // "name" + if err = checkUTF8(name); err != nil { + return "", err + } + if !isValidCaptureName(name) { + return "", &Error{ErrInvalidNamedCapture, capture} + } + + // Like ordinary capture, but named. + p.numCap++ + re := p.op(opLeftParen) + re.Cap = p.numCap + re.Name = name + return t[end+1:], nil + } + + // Non-capturing group. Might also twiddle Perl flags. + var c rune + t = t[2:] // skip (? + flags := p.flags + sign := +1 + sawFlag := false +Loop: + for t != "" { + if c, t, err = nextRune(t); err != nil { + return "", err + } + switch c { + default: + break Loop + + // Flags. + case 'i': + flags |= FoldCase + sawFlag = true + case 'm': + flags &^= OneLine + sawFlag = true + case 's': + flags |= DotNL + sawFlag = true + case 'U': + flags |= NonGreedy + sawFlag = true + + // Switch to negation. + case '-': + if sign < 0 { + break Loop + } + sign = -1 + // Invert flags so that | above turn into &^ and vice versa. + // We'll invert flags again before using it below. + flags = ^flags + sawFlag = false + + // End of flags, starting group or not. + case ':', ')': + if sign < 0 { + if !sawFlag { + break Loop + } + flags = ^flags + } + if c == ':' { + // Open new group + p.op(opLeftParen) + } + p.flags = flags + return t, nil + } + } + + return "", &Error{ErrInvalidPerlOp, s[:len(s)-len(t)]} +} + +// isValidCaptureName reports whether name +// is a valid capture name: [A-Za-z0-9_]+. +// PCRE limits names to 32 bytes. +// Python rejects names starting with digits. +// We don't enforce either of those. +func isValidCaptureName(name string) bool { + if name == "" { + return false + } + for _, c := range name { + if c != '_' && !isalnum(c) { + return false + } + } + return true +} + +// parseInt parses a decimal integer. +func (p *parser) parseInt(s string) (n int, rest string, ok bool) { + if s == "" || s[0] < '0' || '9' < s[0] { + return + } + // Disallow leading zeros. + if len(s) >= 2 && s[0] == '0' && '0' <= s[1] && s[1] <= '9' { + return + } + t := s + for s != "" && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + } + rest = s + ok = true + // Have digits, compute value. + t = t[:len(t)-len(s)] + for i := 0; i < len(t); i++ { + // Avoid overflow. + if n >= 1e8 { + n = -1 + break + } + n = n*10 + int(t[i]) - '0' + } + return +} + +// can this be represented as a character class? +// single-rune literal string, char class, ., and .|\n. +func isCharClass(re *Regexp) bool { + return re.Op == OpLiteral && len(re.Rune) == 1 || + re.Op == OpCharClass || + re.Op == OpAnyCharNotNL || + re.Op == OpAnyChar +} + +// does re match r? +func matchRune(re *Regexp, r rune) bool { + switch re.Op { + case OpLiteral: + return len(re.Rune) == 1 && re.Rune[0] == r + case OpCharClass: + for i := 0; i < len(re.Rune); i += 2 { + if re.Rune[i] <= r && r <= re.Rune[i+1] { + return true + } + } + return false + case OpAnyCharNotNL: + return r != '\n' + case OpAnyChar: + return true + } + return false +} + +// parseVerticalBar handles a | in the input. +func (p *parser) parseVerticalBar() error { + p.concat() + + // The concatenation we just parsed is on top of the stack. + // If it sits above an opVerticalBar, swap it below + // (things below an opVerticalBar become an alternation). + // Otherwise, push a new vertical bar. + if !p.swapVerticalBar() { + p.op(opVerticalBar) + } + + return nil +} + +// mergeCharClass makes dst = dst|src. +// The caller must ensure that dst.Op >= src.Op, +// to reduce the amount of copying. +func mergeCharClass(dst, src *Regexp) { + switch dst.Op { + case OpAnyChar: + // src doesn't add anything. + case OpAnyCharNotNL: + // src might add \n + if matchRune(src, '\n') { + dst.Op = OpAnyChar + } + case OpCharClass: + // src is simpler, so either literal or char class + if src.Op == OpLiteral { + dst.Rune = appendLiteral(dst.Rune, src.Rune[0], src.Flags) + } else { + dst.Rune = appendClass(dst.Rune, src.Rune) + } + case OpLiteral: + // both literal + if src.Rune[0] == dst.Rune[0] && src.Flags == dst.Flags { + break + } + dst.Op = OpCharClass + dst.Rune = appendLiteral(dst.Rune[:0], dst.Rune[0], dst.Flags) + dst.Rune = appendLiteral(dst.Rune, src.Rune[0], src.Flags) + } +} + +// If the top of the stack is an element followed by an opVerticalBar +// swapVerticalBar swaps the two and returns true. +// Otherwise it returns false. +func (p *parser) swapVerticalBar() bool { + // If above and below vertical bar are literal or char class, + // can merge into a single char class. + n := len(p.stack) + if n >= 3 && p.stack[n-2].Op == opVerticalBar && isCharClass(p.stack[n-1]) && isCharClass(p.stack[n-3]) { + re1 := p.stack[n-1] + re3 := p.stack[n-3] + // Make re3 the more complex of the two. + if re1.Op > re3.Op { + re1, re3 = re3, re1 + p.stack[n-3] = re3 + } + mergeCharClass(re3, re1) + p.reuse(re1) + p.stack = p.stack[:n-1] + return true + } + + if n >= 2 { + re1 := p.stack[n-1] + re2 := p.stack[n-2] + if re2.Op == opVerticalBar { + if n >= 3 { + // Now out of reach. + // Clean opportunistically. + cleanAlt(p.stack[n-3]) + } + p.stack[n-2] = re1 + p.stack[n-1] = re2 + return true + } + } + return false +} + +// parseRightParen handles a ) in the input. +func (p *parser) parseRightParen() error { + p.concat() + if p.swapVerticalBar() { + // pop vertical bar + p.stack = p.stack[:len(p.stack)-1] + } + p.alternate() + + n := len(p.stack) + if n < 2 { + return &Error{ErrUnexpectedParen, p.wholeRegexp} + } + re1 := p.stack[n-1] + re2 := p.stack[n-2] + p.stack = p.stack[:n-2] + if re2.Op != opLeftParen { + return &Error{ErrUnexpectedParen, p.wholeRegexp} + } + // Restore flags at time of paren. + p.flags = re2.Flags + if re2.Cap == 0 { + // Just for grouping. + p.push(re1) + } else { + re2.Op = OpCapture + re2.Sub = re2.Sub0[:1] + re2.Sub[0] = re1 + p.push(re2) + } + return nil +} + +// parseEscape parses an escape sequence at the beginning of s +// and returns the rune. +func (p *parser) parseEscape(s string) (r rune, rest string, err error) { + t := s[1:] + if t == "" { + return 0, "", &Error{ErrTrailingBackslash, ""} + } + c, t, err := nextRune(t) + if err != nil { + return 0, "", err + } + +Switch: + switch c { + default: + if c < utf8.RuneSelf && !isalnum(c) { + // Escaped non-word characters are always themselves. + // PCRE is not quite so rigorous: it accepts things like + // \q, but we don't. We once rejected \_, but too many + // programs and people insist on using it, so allow \_. + return c, t, nil + } + + // Octal escapes. + case '1', '2', '3', '4', '5', '6', '7': + // Single non-zero digit is a backreference; not supported + if t == "" || t[0] < '0' || t[0] > '7' { + break + } + fallthrough + case '0': + // Consume up to three octal digits; already have one. + r = c - '0' + for i := 1; i < 3; i++ { + if t == "" || t[0] < '0' || t[0] > '7' { + break + } + r = r*8 + rune(t[0]) - '0' + t = t[1:] + } + return r, t, nil + + // Hexadecimal escapes. + case 'x': + if t == "" { + break + } + if c, t, err = nextRune(t); err != nil { + return 0, "", err + } + if c == '{' { + // Any number of digits in braces. + // Perl accepts any text at all; it ignores all text + // after the first non-hex digit. We require only hex digits, + // and at least one. + nhex := 0 + r = 0 + for { + if t == "" { + break Switch + } + if c, t, err = nextRune(t); err != nil { + return 0, "", err + } + if c == '}' { + break + } + v := unhex(c) + if v < 0 { + break Switch + } + r = r*16 + v + if r > unicode.MaxRune { + break Switch + } + nhex++ + } + if nhex == 0 { + break Switch + } + return r, t, nil + } + + // Easy case: two hex digits. + x := unhex(c) + if c, t, err = nextRune(t); err != nil { + return 0, "", err + } + y := unhex(c) + if x < 0 || y < 0 { + break + } + return x*16 + y, t, nil + + // C escapes. There is no case 'b', to avoid misparsing + // the Perl word-boundary \b as the C backspace \b + // when in POSIX mode. In Perl, /\b/ means word-boundary + // but /[\b]/ means backspace. We don't support that. + // If you want a backspace, embed a literal backspace + // character or use \x08. + case 'a': + return '\a', t, err + case 'f': + return '\f', t, err + case 'n': + return '\n', t, err + case 'r': + return '\r', t, err + case 't': + return '\t', t, err + case 'v': + return '\v', t, err + } + return 0, "", &Error{ErrInvalidEscape, s[:len(s)-len(t)]} +} + +// parseClassChar parses a character class character at the beginning of s +// and returns it. +func (p *parser) parseClassChar(s, wholeClass string) (r rune, rest string, err error) { + if s == "" { + return 0, "", &Error{Code: ErrMissingBracket, Expr: wholeClass} + } + + // Allow regular escape sequences even though + // many need not be escaped in this context. + if s[0] == '\\' { + return p.parseEscape(s) + } + + return nextRune(s) +} + +type charGroup struct { + sign int + class []rune +} + +// parsePerlClassEscape parses a leading Perl character class escape like \d +// from the beginning of s. If one is present, it appends the characters to r +// and returns the new slice r and the remainder of the string. +func (p *parser) parsePerlClassEscape(s string, r []rune) (out []rune, rest string) { + if p.flags&PerlX == 0 || len(s) < 2 || s[0] != '\\' { + return + } + g := perlGroup[s[0:2]] + if g.sign == 0 { + return + } + return p.appendGroup(r, g), s[2:] +} + +// parseNamedClass parses a leading POSIX named character class like [:alnum:] +// from the beginning of s. If one is present, it appends the characters to r +// and returns the new slice r and the remainder of the string. +func (p *parser) parseNamedClass(s string, r []rune) (out []rune, rest string, err error) { + if len(s) < 2 || s[0] != '[' || s[1] != ':' { + return + } + + i := strings.Index(s[2:], ":]") + if i < 0 { + return + } + i += 2 + name, s := s[0:i+2], s[i+2:] + g := posixGroup[name] + if g.sign == 0 { + return nil, "", &Error{ErrInvalidCharRange, name} + } + return p.appendGroup(r, g), s, nil +} + +func (p *parser) appendGroup(r []rune, g charGroup) []rune { + if p.flags&FoldCase == 0 { + if g.sign < 0 { + r = appendNegatedClass(r, g.class) + } else { + r = appendClass(r, g.class) + } + } else { + tmp := p.tmpClass[:0] + tmp = appendFoldedClass(tmp, g.class) + p.tmpClass = tmp + tmp = cleanClass(&p.tmpClass) + if g.sign < 0 { + r = appendNegatedClass(r, tmp) + } else { + r = appendClass(r, tmp) + } + } + return r +} + +var anyTable = &unicode.RangeTable{ + R16: []unicode.Range16{{Lo: 0, Hi: 1<<16 - 1, Stride: 1}}, + R32: []unicode.Range32{{Lo: 1 << 16, Hi: unicode.MaxRune, Stride: 1}}, +} + +// unicodeTable returns the unicode.RangeTable identified by name +// and the table of additional fold-equivalent code points. +func unicodeTable(name string) (*unicode.RangeTable, *unicode.RangeTable) { + // Special case: "Any" means any. + if name == "Any" { + return anyTable, anyTable + } + if t := unicode.Categories[name]; t != nil { + return t, unicode.FoldCategory[name] + } + if t := unicode.Scripts[name]; t != nil { + return t, unicode.FoldScript[name] + } + return nil, nil +} + +// parseUnicodeClass parses a leading Unicode character class like \p{Han} +// from the beginning of s. If one is present, it appends the characters to r +// and returns the new slice r and the remainder of the string. +func (p *parser) parseUnicodeClass(s string, r []rune) (out []rune, rest string, err error) { + if p.flags&UnicodeGroups == 0 || len(s) < 2 || s[0] != '\\' || s[1] != 'p' && s[1] != 'P' { + return + } + + // Committed to parse or return error. + sign := +1 + if s[1] == 'P' { + sign = -1 + } + t := s[2:] + c, t, err := nextRune(t) + if err != nil { + return + } + var seq, name string + if c != '{' { + // Single-letter name. + seq = s[:len(s)-len(t)] + name = seq[2:] + } else { + // Name is in braces. + end := strings.IndexRune(s, '}') + if end < 0 { + if err = checkUTF8(s); err != nil { + return + } + return nil, "", &Error{ErrInvalidCharRange, s} + } + seq, t = s[:end+1], s[end+1:] + name = s[3:end] + if err = checkUTF8(name); err != nil { + return + } + } + + // Group can have leading negation too. \p{^Han} == \P{Han}, \P{^Han} == \p{Han}. + if name != "" && name[0] == '^' { + sign = -sign + name = name[1:] + } + + tab, fold := unicodeTable(name) + if tab == nil { + return nil, "", &Error{ErrInvalidCharRange, seq} + } + + if p.flags&FoldCase == 0 || fold == nil { + if sign > 0 { + r = appendTable(r, tab) + } else { + r = appendNegatedTable(r, tab) + } + } else { + // Merge and clean tab and fold in a temporary buffer. + // This is necessary for the negative case and just tidy + // for the positive case. + tmp := p.tmpClass[:0] + tmp = appendTable(tmp, tab) + tmp = appendTable(tmp, fold) + p.tmpClass = tmp + tmp = cleanClass(&p.tmpClass) + if sign > 0 { + r = appendClass(r, tmp) + } else { + r = appendNegatedClass(r, tmp) + } + } + return r, t, nil +} + +// parseClass parses a character class at the beginning of s +// and pushes it onto the parse stack. +func (p *parser) parseClass(s string) (rest string, err error) { + t := s[1:] // chop [ + re := p.newRegexp(OpCharClass) + re.Flags = p.flags + re.Rune = re.Rune0[:0] + + sign := +1 + if t != "" && t[0] == '^' { + sign = -1 + t = t[1:] + + // If character class does not match \n, add it here, + // so that negation later will do the right thing. + if p.flags&ClassNL == 0 { + re.Rune = append(re.Rune, '\n', '\n') + } + } + + class := re.Rune + first := true // ] and - are okay as first char in class + for t == "" || t[0] != ']' || first { + // POSIX: - is only okay unescaped as first or last in class. + // Perl: - is okay anywhere. + if t != "" && t[0] == '-' && p.flags&PerlX == 0 && !first && (len(t) == 1 || t[1] != ']') { + _, size := utf8.DecodeRuneInString(t[1:]) + return "", &Error{Code: ErrInvalidCharRange, Expr: t[:1+size]} + } + first = false + + // Look for POSIX [:alnum:] etc. + if len(t) > 2 && t[0] == '[' && t[1] == ':' { + nclass, nt, err := p.parseNamedClass(t, class) + if err != nil { + return "", err + } + if nclass != nil { + class, t = nclass, nt + continue + } + } + + // Look for Unicode character group like \p{Han}. + nclass, nt, err := p.parseUnicodeClass(t, class) + if err != nil { + return "", err + } + if nclass != nil { + class, t = nclass, nt + continue + } + + // Look for Perl character class symbols (extension). + if nclass, nt := p.parsePerlClassEscape(t, class); nclass != nil { + class, t = nclass, nt + continue + } + + // Single character or simple range. + rng := t + var lo, hi rune + if lo, t, err = p.parseClassChar(t, s); err != nil { + return "", err + } + hi = lo + // [a-] means (a|-) so check for final ]. + if len(t) >= 2 && t[0] == '-' && t[1] != ']' { + t = t[1:] + if hi, t, err = p.parseClassChar(t, s); err != nil { + return "", err + } + if hi < lo { + rng = rng[:len(rng)-len(t)] + return "", &Error{Code: ErrInvalidCharRange, Expr: rng} + } + } + if p.flags&FoldCase == 0 { + class = appendRange(class, lo, hi) + } else { + class = appendFoldedRange(class, lo, hi) + } + } + t = t[1:] // chop ] + + // Use &re.Rune instead of &class to avoid allocation. + re.Rune = class + class = cleanClass(&re.Rune) + if sign < 0 { + class = negateClass(class) + } + re.Rune = class + p.push(re) + return t, nil +} + +// cleanClass sorts the ranges (pairs of elements of r), +// merges them, and eliminates duplicates. +func cleanClass(rp *[]rune) []rune { + + // Sort by lo increasing, hi decreasing to break ties. + sort.Sort(ranges{rp}) + + r := *rp + if len(r) < 2 { + return r + } + + // Merge abutting, overlapping. + w := 2 // write index + for i := 2; i < len(r); i += 2 { + lo, hi := r[i], r[i+1] + if lo <= r[w-1]+1 { + // merge with previous range + if hi > r[w-1] { + r[w-1] = hi + } + continue + } + // new disjoint range + r[w] = lo + r[w+1] = hi + w += 2 + } + + return r[:w] +} + +// inCharClass reports whether r is in the class. +// It assumes the class has been cleaned by cleanClass. +func inCharClass(r rune, class []rune) bool { + _, ok := sort.Find(len(class)/2, func(i int) int { + lo, hi := class[2*i], class[2*i+1] + if r > hi { + return +1 + } + if r < lo { + return -1 + } + return 0 + }) + return ok +} + +// appendLiteral returns the result of appending the literal x to the class r. +func appendLiteral(r []rune, x rune, flags Flags) []rune { + if flags&FoldCase != 0 { + return appendFoldedRange(r, x, x) + } + return appendRange(r, x, x) +} + +// appendRange returns the result of appending the range lo-hi to the class r. +func appendRange(r []rune, lo, hi rune) []rune { + // Expand last range or next to last range if it overlaps or abuts. + // Checking two ranges helps when appending case-folded + // alphabets, so that one range can be expanding A-Z and the + // other expanding a-z. + n := len(r) + for i := 2; i <= 4; i += 2 { // twice, using i=2, i=4 + if n >= i { + rlo, rhi := r[n-i], r[n-i+1] + if lo <= rhi+1 && rlo <= hi+1 { + if lo < rlo { + r[n-i] = lo + } + if hi > rhi { + r[n-i+1] = hi + } + return r + } + } + } + + return append(r, lo, hi) +} + +const ( + // minimum and maximum runes involved in folding. + // checked during test. + minFold = 0x0041 + maxFold = 0x1e943 +) + +// appendFoldedRange returns the result of appending the range lo-hi +// and its case folding-equivalent runes to the class r. +func appendFoldedRange(r []rune, lo, hi rune) []rune { + // Optimizations. + if lo <= minFold && hi >= maxFold { + // Range is full: folding can't add more. + return appendRange(r, lo, hi) + } + if hi < minFold || lo > maxFold { + // Range is outside folding possibilities. + return appendRange(r, lo, hi) + } + if lo < minFold { + // [lo, minFold-1] needs no folding. + r = appendRange(r, lo, minFold-1) + lo = minFold + } + if hi > maxFold { + // [maxFold+1, hi] needs no folding. + r = appendRange(r, maxFold+1, hi) + hi = maxFold + } + + // Brute force. Depend on appendRange to coalesce ranges on the fly. + for c := lo; c <= hi; c++ { + r = appendRange(r, c, c) + f := unicode.SimpleFold(c) + for f != c { + r = appendRange(r, f, f) + f = unicode.SimpleFold(f) + } + } + return r +} + +// appendClass returns the result of appending the class x to the class r. +// It assume x is clean. +func appendClass(r []rune, x []rune) []rune { + for i := 0; i < len(x); i += 2 { + r = appendRange(r, x[i], x[i+1]) + } + return r +} + +// appendFoldedClass returns the result of appending the case folding of the class x to the class r. +func appendFoldedClass(r []rune, x []rune) []rune { + for i := 0; i < len(x); i += 2 { + r = appendFoldedRange(r, x[i], x[i+1]) + } + return r +} + +// appendNegatedClass returns the result of appending the negation of the class x to the class r. +// It assumes x is clean. +func appendNegatedClass(r []rune, x []rune) []rune { + nextLo := '\u0000' + for i := 0; i < len(x); i += 2 { + lo, hi := x[i], x[i+1] + if nextLo <= lo-1 { + r = appendRange(r, nextLo, lo-1) + } + nextLo = hi + 1 + } + if nextLo <= unicode.MaxRune { + r = appendRange(r, nextLo, unicode.MaxRune) + } + return r +} + +// appendTable returns the result of appending x to the class r. +func appendTable(r []rune, x *unicode.RangeTable) []rune { + for _, xr := range x.R16 { + lo, hi, stride := rune(xr.Lo), rune(xr.Hi), rune(xr.Stride) + if stride == 1 { + r = appendRange(r, lo, hi) + continue + } + for c := lo; c <= hi; c += stride { + r = appendRange(r, c, c) + } + } + for _, xr := range x.R32 { + lo, hi, stride := rune(xr.Lo), rune(xr.Hi), rune(xr.Stride) + if stride == 1 { + r = appendRange(r, lo, hi) + continue + } + for c := lo; c <= hi; c += stride { + r = appendRange(r, c, c) + } + } + return r +} + +// appendNegatedTable returns the result of appending the negation of x to the class r. +func appendNegatedTable(r []rune, x *unicode.RangeTable) []rune { + nextLo := '\u0000' // lo end of next class to add + for _, xr := range x.R16 { + lo, hi, stride := rune(xr.Lo), rune(xr.Hi), rune(xr.Stride) + if stride == 1 { + if nextLo <= lo-1 { + r = appendRange(r, nextLo, lo-1) + } + nextLo = hi + 1 + continue + } + for c := lo; c <= hi; c += stride { + if nextLo <= c-1 { + r = appendRange(r, nextLo, c-1) + } + nextLo = c + 1 + } + } + for _, xr := range x.R32 { + lo, hi, stride := rune(xr.Lo), rune(xr.Hi), rune(xr.Stride) + if stride == 1 { + if nextLo <= lo-1 { + r = appendRange(r, nextLo, lo-1) + } + nextLo = hi + 1 + continue + } + for c := lo; c <= hi; c += stride { + if nextLo <= c-1 { + r = appendRange(r, nextLo, c-1) + } + nextLo = c + 1 + } + } + if nextLo <= unicode.MaxRune { + r = appendRange(r, nextLo, unicode.MaxRune) + } + return r +} + +// negateClass overwrites r and returns r's negation. +// It assumes the class r is already clean. +func negateClass(r []rune) []rune { + nextLo := '\u0000' // lo end of next class to add + w := 0 // write index + for i := 0; i < len(r); i += 2 { + lo, hi := r[i], r[i+1] + if nextLo <= lo-1 { + r[w] = nextLo + r[w+1] = lo - 1 + w += 2 + } + nextLo = hi + 1 + } + r = r[:w] + if nextLo <= unicode.MaxRune { + // It's possible for the negation to have one more + // range - this one - than the original class, so use append. + r = append(r, nextLo, unicode.MaxRune) + } + return r +} + +// ranges implements sort.Interface on a []rune. +// The choice of receiver type definition is strange +// but avoids an allocation since we already have +// a *[]rune. +type ranges struct { + p *[]rune +} + +func (ra ranges) Less(i, j int) bool { + p := *ra.p + i *= 2 + j *= 2 + return p[i] < p[j] || p[i] == p[j] && p[i+1] > p[j+1] +} + +func (ra ranges) Len() int { + return len(*ra.p) / 2 +} + +func (ra ranges) Swap(i, j int) { + p := *ra.p + i *= 2 + j *= 2 + p[i], p[i+1], p[j], p[j+1] = p[j], p[j+1], p[i], p[i+1] +} + +func checkUTF8(s string) error { + for s != "" { + rune, size := utf8.DecodeRuneInString(s) + if rune == utf8.RuneError && size == 1 { + return &Error{Code: ErrInvalidUTF8, Expr: s} + } + s = s[size:] + } + return nil +} + +func nextRune(s string) (c rune, t string, err error) { + c, size := utf8.DecodeRuneInString(s) + if c == utf8.RuneError && size == 1 { + return 0, "", &Error{Code: ErrInvalidUTF8, Expr: s} + } + return c, s[size:], nil +} + +func isalnum(c rune) bool { + return '0' <= c && c <= '9' || 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' +} + +func unhex(c rune) rune { + if '0' <= c && c <= '9' { + return c - '0' + } + if 'a' <= c && c <= 'f' { + return c - 'a' + 10 + } + if 'A' <= c && c <= 'F' { + return c - 'A' + 10 + } + return -1 +} diff --git a/vendor/github.com/grafana/regexp/syntax/perl_groups.go b/vendor/github.com/grafana/regexp/syntax/perl_groups.go new file mode 100644 index 00000000..effe4e68 --- /dev/null +++ b/vendor/github.com/grafana/regexp/syntax/perl_groups.go @@ -0,0 +1,134 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// GENERATED BY make_perl_groups.pl; DO NOT EDIT. +// make_perl_groups.pl >perl_groups.go + +package syntax + +var code1 = []rune{ /* \d */ + 0x30, 0x39, +} + +var code2 = []rune{ /* \s */ + 0x9, 0xa, + 0xc, 0xd, + 0x20, 0x20, +} + +var code3 = []rune{ /* \w */ + 0x30, 0x39, + 0x41, 0x5a, + 0x5f, 0x5f, + 0x61, 0x7a, +} + +var perlGroup = map[string]charGroup{ + `\d`: {+1, code1}, + `\D`: {-1, code1}, + `\s`: {+1, code2}, + `\S`: {-1, code2}, + `\w`: {+1, code3}, + `\W`: {-1, code3}, +} +var code4 = []rune{ /* [:alnum:] */ + 0x30, 0x39, + 0x41, 0x5a, + 0x61, 0x7a, +} + +var code5 = []rune{ /* [:alpha:] */ + 0x41, 0x5a, + 0x61, 0x7a, +} + +var code6 = []rune{ /* [:ascii:] */ + 0x0, 0x7f, +} + +var code7 = []rune{ /* [:blank:] */ + 0x9, 0x9, + 0x20, 0x20, +} + +var code8 = []rune{ /* [:cntrl:] */ + 0x0, 0x1f, + 0x7f, 0x7f, +} + +var code9 = []rune{ /* [:digit:] */ + 0x30, 0x39, +} + +var code10 = []rune{ /* [:graph:] */ + 0x21, 0x7e, +} + +var code11 = []rune{ /* [:lower:] */ + 0x61, 0x7a, +} + +var code12 = []rune{ /* [:print:] */ + 0x20, 0x7e, +} + +var code13 = []rune{ /* [:punct:] */ + 0x21, 0x2f, + 0x3a, 0x40, + 0x5b, 0x60, + 0x7b, 0x7e, +} + +var code14 = []rune{ /* [:space:] */ + 0x9, 0xd, + 0x20, 0x20, +} + +var code15 = []rune{ /* [:upper:] */ + 0x41, 0x5a, +} + +var code16 = []rune{ /* [:word:] */ + 0x30, 0x39, + 0x41, 0x5a, + 0x5f, 0x5f, + 0x61, 0x7a, +} + +var code17 = []rune{ /* [:xdigit:] */ + 0x30, 0x39, + 0x41, 0x46, + 0x61, 0x66, +} + +var posixGroup = map[string]charGroup{ + `[:alnum:]`: {+1, code4}, + `[:^alnum:]`: {-1, code4}, + `[:alpha:]`: {+1, code5}, + `[:^alpha:]`: {-1, code5}, + `[:ascii:]`: {+1, code6}, + `[:^ascii:]`: {-1, code6}, + `[:blank:]`: {+1, code7}, + `[:^blank:]`: {-1, code7}, + `[:cntrl:]`: {+1, code8}, + `[:^cntrl:]`: {-1, code8}, + `[:digit:]`: {+1, code9}, + `[:^digit:]`: {-1, code9}, + `[:graph:]`: {+1, code10}, + `[:^graph:]`: {-1, code10}, + `[:lower:]`: {+1, code11}, + `[:^lower:]`: {-1, code11}, + `[:print:]`: {+1, code12}, + `[:^print:]`: {-1, code12}, + `[:punct:]`: {+1, code13}, + `[:^punct:]`: {-1, code13}, + `[:space:]`: {+1, code14}, + `[:^space:]`: {-1, code14}, + `[:upper:]`: {+1, code15}, + `[:^upper:]`: {-1, code15}, + `[:word:]`: {+1, code16}, + `[:^word:]`: {-1, code16}, + `[:xdigit:]`: {+1, code17}, + `[:^xdigit:]`: {-1, code17}, +} diff --git a/vendor/github.com/grafana/regexp/syntax/prog.go b/vendor/github.com/grafana/regexp/syntax/prog.go new file mode 100644 index 00000000..6a3705ec --- /dev/null +++ b/vendor/github.com/grafana/regexp/syntax/prog.go @@ -0,0 +1,349 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package syntax + +import ( + "strconv" + "strings" + "unicode" + "unicode/utf8" +) + +// Compiled program. +// May not belong in this package, but convenient for now. + +// A Prog is a compiled regular expression program. +type Prog struct { + Inst []Inst + Start int // index of start instruction + NumCap int // number of InstCapture insts in re +} + +// An InstOp is an instruction opcode. +type InstOp uint8 + +const ( + InstAlt InstOp = iota + InstAltMatch + InstCapture + InstEmptyWidth + InstMatch + InstFail + InstNop + InstRune + InstRune1 + InstRuneAny + InstRuneAnyNotNL +) + +var instOpNames = []string{ + "InstAlt", + "InstAltMatch", + "InstCapture", + "InstEmptyWidth", + "InstMatch", + "InstFail", + "InstNop", + "InstRune", + "InstRune1", + "InstRuneAny", + "InstRuneAnyNotNL", +} + +func (i InstOp) String() string { + if uint(i) >= uint(len(instOpNames)) { + return "" + } + return instOpNames[i] +} + +// An EmptyOp specifies a kind or mixture of zero-width assertions. +type EmptyOp uint8 + +const ( + EmptyBeginLine EmptyOp = 1 << iota + EmptyEndLine + EmptyBeginText + EmptyEndText + EmptyWordBoundary + EmptyNoWordBoundary +) + +// EmptyOpContext returns the zero-width assertions +// satisfied at the position between the runes r1 and r2. +// Passing r1 == -1 indicates that the position is +// at the beginning of the text. +// Passing r2 == -1 indicates that the position is +// at the end of the text. +func EmptyOpContext(r1, r2 rune) EmptyOp { + var op EmptyOp = EmptyNoWordBoundary + var boundary byte + switch { + case IsWordChar(r1): + boundary = 1 + case r1 == '\n': + op |= EmptyBeginLine + case r1 < 0: + op |= EmptyBeginText | EmptyBeginLine + } + switch { + case IsWordChar(r2): + boundary ^= 1 + case r2 == '\n': + op |= EmptyEndLine + case r2 < 0: + op |= EmptyEndText | EmptyEndLine + } + if boundary != 0 { // IsWordChar(r1) != IsWordChar(r2) + op ^= (EmptyWordBoundary | EmptyNoWordBoundary) + } + return op +} + +// IsWordChar reports whether r is considered a “word character” +// during the evaluation of the \b and \B zero-width assertions. +// These assertions are ASCII-only: the word characters are [A-Za-z0-9_]. +func IsWordChar(r rune) bool { + // Test for lowercase letters first, as these occur more + // frequently than uppercase letters in common cases. + return 'a' <= r && r <= 'z' || 'A' <= r && r <= 'Z' || '0' <= r && r <= '9' || r == '_' +} + +// An Inst is a single instruction in a regular expression program. +type Inst struct { + Op InstOp + Out uint32 // all but InstMatch, InstFail + Arg uint32 // InstAlt, InstAltMatch, InstCapture, InstEmptyWidth + Rune []rune +} + +func (p *Prog) String() string { + var b strings.Builder + dumpProg(&b, p) + return b.String() +} + +// skipNop follows any no-op or capturing instructions. +func (p *Prog) skipNop(pc uint32) *Inst { + i := &p.Inst[pc] + for i.Op == InstNop || i.Op == InstCapture { + i = &p.Inst[i.Out] + } + return i +} + +// op returns i.Op but merges all the Rune special cases into InstRune +func (i *Inst) op() InstOp { + op := i.Op + switch op { + case InstRune1, InstRuneAny, InstRuneAnyNotNL: + op = InstRune + } + return op +} + +// Prefix returns a literal string that all matches for the +// regexp must start with. Complete is true if the prefix +// is the entire match. +func (p *Prog) Prefix() (prefix string, complete bool) { + i := p.skipNop(uint32(p.Start)) + + // Avoid allocation of buffer if prefix is empty. + if i.op() != InstRune || len(i.Rune) != 1 { + return "", i.Op == InstMatch + } + + // Have prefix; gather characters. + var buf strings.Builder + for i.op() == InstRune && len(i.Rune) == 1 && Flags(i.Arg)&FoldCase == 0 && i.Rune[0] != utf8.RuneError { + buf.WriteRune(i.Rune[0]) + i = p.skipNop(i.Out) + } + return buf.String(), i.Op == InstMatch +} + +// StartCond returns the leading empty-width conditions that must +// be true in any match. It returns ^EmptyOp(0) if no matches are possible. +func (p *Prog) StartCond() EmptyOp { + var flag EmptyOp + pc := uint32(p.Start) + i := &p.Inst[pc] +Loop: + for { + switch i.Op { + case InstEmptyWidth: + flag |= EmptyOp(i.Arg) + case InstFail: + return ^EmptyOp(0) + case InstCapture, InstNop: + // skip + default: + break Loop + } + pc = i.Out + i = &p.Inst[pc] + } + return flag +} + +const noMatch = -1 + +// MatchRune reports whether the instruction matches (and consumes) r. +// It should only be called when i.Op == [InstRune]. +func (i *Inst) MatchRune(r rune) bool { + return i.MatchRunePos(r) != noMatch +} + +// MatchRunePos checks whether the instruction matches (and consumes) r. +// If so, MatchRunePos returns the index of the matching rune pair +// (or, when len(i.Rune) == 1, rune singleton). +// If not, MatchRunePos returns -1. +// MatchRunePos should only be called when i.Op == [InstRune]. +func (i *Inst) MatchRunePos(r rune) int { + rune := i.Rune + + switch len(rune) { + case 0: + return noMatch + + case 1: + // Special case: single-rune slice is from literal string, not char class. + r0 := rune[0] + if r == r0 { + return 0 + } + if Flags(i.Arg)&FoldCase != 0 { + for r1 := unicode.SimpleFold(r0); r1 != r0; r1 = unicode.SimpleFold(r1) { + if r == r1 { + return 0 + } + } + } + return noMatch + + case 2: + if r >= rune[0] && r <= rune[1] { + return 0 + } + return noMatch + + case 4, 6, 8: + // Linear search for a few pairs. + // Should handle ASCII well. + for j := 0; j < len(rune); j += 2 { + if r < rune[j] { + return noMatch + } + if r <= rune[j+1] { + return j / 2 + } + } + return noMatch + } + + // Otherwise binary search. + lo := 0 + hi := len(rune) / 2 + for lo < hi { + m := int(uint(lo+hi) >> 1) + if c := rune[2*m]; c <= r { + if r <= rune[2*m+1] { + return m + } + lo = m + 1 + } else { + hi = m + } + } + return noMatch +} + +// MatchEmptyWidth reports whether the instruction matches +// an empty string between the runes before and after. +// It should only be called when i.Op == [InstEmptyWidth]. +func (i *Inst) MatchEmptyWidth(before rune, after rune) bool { + switch EmptyOp(i.Arg) { + case EmptyBeginLine: + return before == '\n' || before == -1 + case EmptyEndLine: + return after == '\n' || after == -1 + case EmptyBeginText: + return before == -1 + case EmptyEndText: + return after == -1 + case EmptyWordBoundary: + return IsWordChar(before) != IsWordChar(after) + case EmptyNoWordBoundary: + return IsWordChar(before) == IsWordChar(after) + } + panic("unknown empty width arg") +} + +func (i *Inst) String() string { + var b strings.Builder + dumpInst(&b, i) + return b.String() +} + +func bw(b *strings.Builder, args ...string) { + for _, s := range args { + b.WriteString(s) + } +} + +func dumpProg(b *strings.Builder, p *Prog) { + for j := range p.Inst { + i := &p.Inst[j] + pc := strconv.Itoa(j) + if len(pc) < 3 { + b.WriteString(" "[len(pc):]) + } + if j == p.Start { + pc += "*" + } + bw(b, pc, "\t") + dumpInst(b, i) + bw(b, "\n") + } +} + +func u32(i uint32) string { + return strconv.FormatUint(uint64(i), 10) +} + +func dumpInst(b *strings.Builder, i *Inst) { + switch i.Op { + case InstAlt: + bw(b, "alt -> ", u32(i.Out), ", ", u32(i.Arg)) + case InstAltMatch: + bw(b, "altmatch -> ", u32(i.Out), ", ", u32(i.Arg)) + case InstCapture: + bw(b, "cap ", u32(i.Arg), " -> ", u32(i.Out)) + case InstEmptyWidth: + bw(b, "empty ", u32(i.Arg), " -> ", u32(i.Out)) + case InstMatch: + bw(b, "match") + case InstFail: + bw(b, "fail") + case InstNop: + bw(b, "nop -> ", u32(i.Out)) + case InstRune: + if i.Rune == nil { + // shouldn't happen + bw(b, "rune ") + } + bw(b, "rune ", strconv.QuoteToASCII(string(i.Rune))) + if Flags(i.Arg)&FoldCase != 0 { + bw(b, "/i") + } + bw(b, " -> ", u32(i.Out)) + case InstRune1: + bw(b, "rune1 ", strconv.QuoteToASCII(string(i.Rune)), " -> ", u32(i.Out)) + case InstRuneAny: + bw(b, "any -> ", u32(i.Out)) + case InstRuneAnyNotNL: + bw(b, "anynotnl -> ", u32(i.Out)) + } +} diff --git a/vendor/github.com/grafana/regexp/syntax/regexp.go b/vendor/github.com/grafana/regexp/syntax/regexp.go new file mode 100644 index 00000000..8ad3653a --- /dev/null +++ b/vendor/github.com/grafana/regexp/syntax/regexp.go @@ -0,0 +1,464 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package syntax + +// Note to implementers: +// In this package, re is always a *Regexp and r is always a rune. + +import ( + "slices" + "strconv" + "strings" + "unicode" +) + +// A Regexp is a node in a regular expression syntax tree. +type Regexp struct { + Op Op // operator + Flags Flags + Sub []*Regexp // subexpressions, if any + Sub0 [1]*Regexp // storage for short Sub + Rune []rune // matched runes, for OpLiteral, OpCharClass + Rune0 [2]rune // storage for short Rune + Min, Max int // min, max for OpRepeat + Cap int // capturing index, for OpCapture + Name string // capturing name, for OpCapture +} + +//go:generate stringer -type Op -trimprefix Op + +// An Op is a single regular expression operator. +type Op uint8 + +// Operators are listed in precedence order, tightest binding to weakest. +// Character class operators are listed simplest to most complex +// (OpLiteral, OpCharClass, OpAnyCharNotNL, OpAnyChar). + +const ( + OpNoMatch Op = 1 + iota // matches no strings + OpEmptyMatch // matches empty string + OpLiteral // matches Runes sequence + OpCharClass // matches Runes interpreted as range pair list + OpAnyCharNotNL // matches any character except newline + OpAnyChar // matches any character + OpBeginLine // matches empty string at beginning of line + OpEndLine // matches empty string at end of line + OpBeginText // matches empty string at beginning of text + OpEndText // matches empty string at end of text + OpWordBoundary // matches word boundary `\b` + OpNoWordBoundary // matches word non-boundary `\B` + OpCapture // capturing subexpression with index Cap, optional name Name + OpStar // matches Sub[0] zero or more times + OpPlus // matches Sub[0] one or more times + OpQuest // matches Sub[0] zero or one times + OpRepeat // matches Sub[0] at least Min times, at most Max (Max == -1 is no limit) + OpConcat // matches concatenation of Subs + OpAlternate // matches alternation of Subs +) + +const opPseudo Op = 128 // where pseudo-ops start + +// Equal reports whether x and y have identical structure. +func (x *Regexp) Equal(y *Regexp) bool { + if x == nil || y == nil { + return x == y + } + if x.Op != y.Op { + return false + } + switch x.Op { + case OpEndText: + // The parse flags remember whether this is \z or \Z. + if x.Flags&WasDollar != y.Flags&WasDollar { + return false + } + + case OpLiteral, OpCharClass: + return slices.Equal(x.Rune, y.Rune) + + case OpAlternate, OpConcat: + return slices.EqualFunc(x.Sub, y.Sub, func(a, b *Regexp) bool { return a.Equal(b) }) + + case OpStar, OpPlus, OpQuest: + if x.Flags&NonGreedy != y.Flags&NonGreedy || !x.Sub[0].Equal(y.Sub[0]) { + return false + } + + case OpRepeat: + if x.Flags&NonGreedy != y.Flags&NonGreedy || x.Min != y.Min || x.Max != y.Max || !x.Sub[0].Equal(y.Sub[0]) { + return false + } + + case OpCapture: + if x.Cap != y.Cap || x.Name != y.Name || !x.Sub[0].Equal(y.Sub[0]) { + return false + } + } + return true +} + +// printFlags is a bit set indicating which flags (including non-capturing parens) to print around a regexp. +type printFlags uint8 + +const ( + flagI printFlags = 1 << iota // (?i: + flagM // (?m: + flagS // (?s: + flagOff // ) + flagPrec // (?: ) + negShift = 5 // flagI<") + case OpNoMatch: + b.WriteString(`[^\x00-\x{10FFFF}]`) + case OpEmptyMatch: + b.WriteString(`(?:)`) + case OpLiteral: + for _, r := range re.Rune { + escape(b, r, false) + } + case OpCharClass: + if len(re.Rune)%2 != 0 { + b.WriteString(`[invalid char class]`) + break + } + b.WriteRune('[') + if len(re.Rune) == 0 { + b.WriteString(`^\x00-\x{10FFFF}`) + } else if re.Rune[0] == 0 && re.Rune[len(re.Rune)-1] == unicode.MaxRune && len(re.Rune) > 2 { + // Contains 0 and MaxRune. Probably a negated class. + // Print the gaps. + b.WriteRune('^') + for i := 1; i < len(re.Rune)-1; i += 2 { + lo, hi := re.Rune[i]+1, re.Rune[i+1]-1 + escape(b, lo, lo == '-') + if lo != hi { + if hi != lo+1 { + b.WriteRune('-') + } + escape(b, hi, hi == '-') + } + } + } else { + for i := 0; i < len(re.Rune); i += 2 { + lo, hi := re.Rune[i], re.Rune[i+1] + escape(b, lo, lo == '-') + if lo != hi { + if hi != lo+1 { + b.WriteRune('-') + } + escape(b, hi, hi == '-') + } + } + } + b.WriteRune(']') + case OpAnyCharNotNL, OpAnyChar: + b.WriteString(`.`) + case OpBeginLine: + b.WriteString(`^`) + case OpEndLine: + b.WriteString(`$`) + case OpBeginText: + b.WriteString(`\A`) + case OpEndText: + if re.Flags&WasDollar != 0 { + b.WriteString(`$`) + } else { + b.WriteString(`\z`) + } + case OpWordBoundary: + b.WriteString(`\b`) + case OpNoWordBoundary: + b.WriteString(`\B`) + case OpCapture: + if re.Name != "" { + b.WriteString(`(?P<`) + b.WriteString(re.Name) + b.WriteRune('>') + } else { + b.WriteRune('(') + } + if re.Sub[0].Op != OpEmptyMatch { + writeRegexp(b, re.Sub[0], flags[re.Sub[0]], flags) + } + b.WriteRune(')') + case OpStar, OpPlus, OpQuest, OpRepeat: + p := printFlags(0) + sub := re.Sub[0] + if sub.Op > OpCapture || sub.Op == OpLiteral && len(sub.Rune) > 1 { + p = flagPrec + } + writeRegexp(b, sub, p, flags) + + switch re.Op { + case OpStar: + b.WriteRune('*') + case OpPlus: + b.WriteRune('+') + case OpQuest: + b.WriteRune('?') + case OpRepeat: + b.WriteRune('{') + b.WriteString(strconv.Itoa(re.Min)) + if re.Max != re.Min { + b.WriteRune(',') + if re.Max >= 0 { + b.WriteString(strconv.Itoa(re.Max)) + } + } + b.WriteRune('}') + } + if re.Flags&NonGreedy != 0 { + b.WriteRune('?') + } + case OpConcat: + for _, sub := range re.Sub { + p := printFlags(0) + if sub.Op == OpAlternate { + p = flagPrec + } + writeRegexp(b, sub, p, flags) + } + case OpAlternate: + for i, sub := range re.Sub { + if i > 0 { + b.WriteRune('|') + } + writeRegexp(b, sub, 0, flags) + } + } +} + +func (re *Regexp) String() string { + var b strings.Builder + var flags map[*Regexp]printFlags + must, cant := calcFlags(re, &flags) + must |= (cant &^ flagI) << negShift + if must != 0 { + must |= flagOff + } + writeRegexp(&b, re, must, flags) + return b.String() +} + +const meta = `\.+*?()|[]{}^$` + +func escape(b *strings.Builder, r rune, force bool) { + if unicode.IsPrint(r) { + if strings.ContainsRune(meta, r) || force { + b.WriteRune('\\') + } + b.WriteRune(r) + return + } + + switch r { + case '\a': + b.WriteString(`\a`) + case '\f': + b.WriteString(`\f`) + case '\n': + b.WriteString(`\n`) + case '\r': + b.WriteString(`\r`) + case '\t': + b.WriteString(`\t`) + case '\v': + b.WriteString(`\v`) + default: + if r < 0x100 { + b.WriteString(`\x`) + s := strconv.FormatInt(int64(r), 16) + if len(s) == 1 { + b.WriteRune('0') + } + b.WriteString(s) + break + } + b.WriteString(`\x{`) + b.WriteString(strconv.FormatInt(int64(r), 16)) + b.WriteString(`}`) + } +} + +// MaxCap walks the regexp to find the maximum capture index. +func (re *Regexp) MaxCap() int { + m := 0 + if re.Op == OpCapture { + m = re.Cap + } + for _, sub := range re.Sub { + if n := sub.MaxCap(); m < n { + m = n + } + } + return m +} + +// CapNames walks the regexp to find the names of capturing groups. +func (re *Regexp) CapNames() []string { + names := make([]string, re.MaxCap()+1) + re.capNames(names) + return names +} + +func (re *Regexp) capNames(names []string) { + if re.Op == OpCapture { + names[re.Cap] = re.Name + } + for _, sub := range re.Sub { + sub.capNames(names) + } +} diff --git a/vendor/github.com/grafana/regexp/syntax/simplify.go b/vendor/github.com/grafana/regexp/syntax/simplify.go new file mode 100644 index 00000000..e4393251 --- /dev/null +++ b/vendor/github.com/grafana/regexp/syntax/simplify.go @@ -0,0 +1,151 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package syntax + +// Simplify returns a regexp equivalent to re but without counted repetitions +// and with various other simplifications, such as rewriting /(?:a+)+/ to /a+/. +// The resulting regexp will execute correctly but its string representation +// will not produce the same parse tree, because capturing parentheses +// may have been duplicated or removed. For example, the simplified form +// for /(x){1,2}/ is /(x)(x)?/ but both parentheses capture as $1. +// The returned regexp may share structure with or be the original. +func (re *Regexp) Simplify() *Regexp { + if re == nil { + return nil + } + switch re.Op { + case OpCapture, OpConcat, OpAlternate: + // Simplify children, building new Regexp if children change. + nre := re + for i, sub := range re.Sub { + nsub := sub.Simplify() + if nre == re && nsub != sub { + // Start a copy. + nre = new(Regexp) + *nre = *re + nre.Rune = nil + nre.Sub = append(nre.Sub0[:0], re.Sub[:i]...) + } + if nre != re { + nre.Sub = append(nre.Sub, nsub) + } + } + return nre + + case OpStar, OpPlus, OpQuest: + sub := re.Sub[0].Simplify() + return simplify1(re.Op, re.Flags, sub, re) + + case OpRepeat: + // Special special case: x{0} matches the empty string + // and doesn't even need to consider x. + if re.Min == 0 && re.Max == 0 { + return &Regexp{Op: OpEmptyMatch} + } + + // The fun begins. + sub := re.Sub[0].Simplify() + + // x{n,} means at least n matches of x. + if re.Max == -1 { + // Special case: x{0,} is x*. + if re.Min == 0 { + return simplify1(OpStar, re.Flags, sub, nil) + } + + // Special case: x{1,} is x+. + if re.Min == 1 { + return simplify1(OpPlus, re.Flags, sub, nil) + } + + // General case: x{4,} is xxxx+. + nre := &Regexp{Op: OpConcat} + nre.Sub = nre.Sub0[:0] + for i := 0; i < re.Min-1; i++ { + nre.Sub = append(nre.Sub, sub) + } + nre.Sub = append(nre.Sub, simplify1(OpPlus, re.Flags, sub, nil)) + return nre + } + + // Special case x{0} handled above. + + // Special case: x{1} is just x. + if re.Min == 1 && re.Max == 1 { + return sub + } + + // General case: x{n,m} means n copies of x and m copies of x? + // The machine will do less work if we nest the final m copies, + // so that x{2,5} = xx(x(x(x)?)?)? + + // Build leading prefix: xx. + var prefix *Regexp + if re.Min > 0 { + prefix = &Regexp{Op: OpConcat} + prefix.Sub = prefix.Sub0[:0] + for i := 0; i < re.Min; i++ { + prefix.Sub = append(prefix.Sub, sub) + } + } + + // Build and attach suffix: (x(x(x)?)?)? + if re.Max > re.Min { + suffix := simplify1(OpQuest, re.Flags, sub, nil) + for i := re.Min + 1; i < re.Max; i++ { + nre2 := &Regexp{Op: OpConcat} + nre2.Sub = append(nre2.Sub0[:0], sub, suffix) + suffix = simplify1(OpQuest, re.Flags, nre2, nil) + } + if prefix == nil { + return suffix + } + prefix.Sub = append(prefix.Sub, suffix) + } + if prefix != nil { + return prefix + } + + // Some degenerate case like min > max or min < max < 0. + // Handle as impossible match. + return &Regexp{Op: OpNoMatch} + } + + return re +} + +// simplify1 implements Simplify for the unary OpStar, +// OpPlus, and OpQuest operators. It returns the simple regexp +// equivalent to +// +// Regexp{Op: op, Flags: flags, Sub: {sub}} +// +// under the assumption that sub is already simple, and +// without first allocating that structure. If the regexp +// to be returned turns out to be equivalent to re, simplify1 +// returns re instead. +// +// simplify1 is factored out of Simplify because the implementation +// for other operators generates these unary expressions. +// Letting them call simplify1 makes sure the expressions they +// generate are simple. +func simplify1(op Op, flags Flags, sub, re *Regexp) *Regexp { + // Special case: repeat the empty string as much as + // you want, but it's still the empty string. + if sub.Op == OpEmptyMatch { + return sub + } + // The operators are idempotent if the flags match. + if op == sub.Op && flags&NonGreedy == sub.Flags&NonGreedy { + return sub + } + if re != nil && re.Op == op && re.Flags&NonGreedy == flags&NonGreedy && sub == re.Sub[0] { + return re + } + + re = &Regexp{Op: op, Flags: flags} + re.Sub = append(re.Sub0[:0], sub) + return re +} diff --git a/vendor/github.com/hamba/avro/v2/.gitignore b/vendor/github.com/hamba/avro/v2/.gitignore new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/hamba/avro/v2/.golangci.yml b/vendor/github.com/hamba/avro/v2/.golangci.yml new file mode 100644 index 00000000..e3e2dc55 --- /dev/null +++ b/vendor/github.com/hamba/avro/v2/.golangci.yml @@ -0,0 +1,57 @@ +run: + tests: false + deadline: 5m + +linters-settings: + gofumpt: + extra-rules: true + +linters: + enable-all: true + disable: + - cyclop # duplicate of gocyclo + - deadcode # deprecated + - execinquery # deprecated + - exhaustivestruct # deprecated + - golint # deprecated + - gomnd # deprecated + - ifshort # deprecated + - interfacer # deprecated + - maligned # deprecated + - nosnakecase # deprecated + - scopelint # deprecated + - structcheck # deprecated + - varcheck # deprecated + - depguard + - err113 + - exhaustive + - exhaustruct + - forcetypeassert + - funlen + - gochecknoglobals + - gochecknoinits + - gocognit + - goconst + - gocyclo + - gosmopolitan + - inamedparam + - interfacebloat + - ireturn + - mnd + - nestif + - nlreturn + - nonamedreturns + - tagliatelle + - varnamelen + - wrapcheck + - wsl + +issues: + exclude-use-default: false + exclude: + - 'package-comments: should have a package comment' + - 'G103: Use of unsafe calls should be audited' + exclude-rules: + - path: (schema|protocol)\.go + linters: + - gosec \ No newline at end of file diff --git a/vendor/github.com/hamba/avro/v2/CODE_OF_CONDUCT.md b/vendor/github.com/hamba/avro/v2/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..f3e91296 --- /dev/null +++ b/vendor/github.com/hamba/avro/v2/CODE_OF_CONDUCT.md @@ -0,0 +1,76 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, sex characteristics, gender identity and expression, +level of experience, education, socio-economic status, nationality, personal +appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at nick@wiersma.co.za. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see +https://www.contributor-covenant.org/faq diff --git a/vendor/github.com/hamba/avro/v2/LICENCE b/vendor/github.com/hamba/avro/v2/LICENCE new file mode 100644 index 00000000..7ab55a48 --- /dev/null +++ b/vendor/github.com/hamba/avro/v2/LICENCE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2024 Nicholas Wiersma + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/hamba/avro/v2/Makefile b/vendor/github.com/hamba/avro/v2/Makefile new file mode 100644 index 00000000..3de65faf --- /dev/null +++ b/vendor/github.com/hamba/avro/v2/Makefile @@ -0,0 +1,27 @@ +# Format all files +fmt: + @echo "==> Formatting source" + @gofmt -s -w $(shell find . -type f -name '*.go' -not -path "./vendor/*") + @echo "==> Done" +.PHONY: fmt + +# Tidy the go.mod file +tidy: + @echo "==> Cleaning go.mod" + @go mod tidy + @echo "==> Done" +.PHONY: tidy + +# Run all tests +test: + @go test -cover -race ./... +.PHONY: test + +# Lint the project +lint: + @golangci-lint run ./... +.PHONY: lint + +# Run CI tasks +ci: lint test +.PHONY: ci diff --git a/vendor/github.com/hamba/avro/v2/README.md b/vendor/github.com/hamba/avro/v2/README.md new file mode 100644 index 00000000..90a86664 --- /dev/null +++ b/vendor/github.com/hamba/avro/v2/README.md @@ -0,0 +1,257 @@ + + + + Logo + + +[![Go Report Card](https://goreportcard.com/badge/github.com/hamba/avro/v2)](https://goreportcard.com/report/github.com/hamba/avro/v2) +[![Build Status](https://github.com/hamba/avro/actions/workflows/test.yml/badge.svg)](https://github.com/hamba/avro/actions) +[![Coverage Status](https://coveralls.io/repos/github/hamba/avro/badge.svg?branch=main)](https://coveralls.io/github/hamba/avro?branch=main) +[![Go Reference](https://pkg.go.dev/badge/github.com/hamba/avro/v2.svg)](https://pkg.go.dev/github.com/hamba/avro/v2) +[![GitHub release](https://img.shields.io/github/release/hamba/avro.svg)](https://github.com/hamba/avro/releases) +[![GitHub license](https://img.shields.io/badge/license-MIT-blue.svg)](https://raw.githubusercontent.com/hamba/avro/master/LICENSE) + +A fast Go avro codec + +## Overview + +Install with: + +```shell +go get github.com/hamba/avro/v2 +``` + +**Note:** This project has renamed the default branch from `master` to `main`. You will need to update your local environment. + +## Usage + +```go +type SimpleRecord struct { + A int64 `avro:"a"` + B string `avro:"b"` +} + +schema, err := avro.Parse(`{ + "type": "record", + "name": "simple", + "namespace": "org.hamba.avro", + "fields" : [ + {"name": "a", "type": "long"}, + {"name": "b", "type": "string"} + ] +}`) +if err != nil { + log.Fatal(err) +} + +in := SimpleRecord{A: 27, B: "foo"} + +data, err := avro.Marshal(schema, in) +if err != nil { + log.Fatal(err) +} + +fmt.Println(data) +// Outputs: [54 6 102 111 111] + +out := SimpleRecord{} +err = avro.Unmarshal(schema, data, &out) +if err != nil { + log.Fatal(err) +} + +fmt.Println(out) +// Outputs: {27 foo} +``` + +More examples in the [godoc](https://pkg.go.dev/github.com/hamba/avro/v2). + +#### Types Conversions + +| Avro | Go Struct | Go Interface | +|-------------------------------|------------------------------------------------------------|--------------------------| +| `null` | `nil` | `nil` | +| `boolean` | `bool` | `bool` | +| `bytes` | `[]byte` | `[]byte` | +| `float` | `float32` | `float32` | +| `double` | `float64` | `float64` | +| `long` | `int`\*, `int64`, `uint32`\** | `int`, `int64`, `uint32` | +| `int` | `int`\*, `int32`, `int16`, `int8`, `uint8`\**, `uint16`\** | `int`, `uint8`, `uint16` | +| `fixed` | `uint64` | `uint64` | +| `string` | `string` | `string` | +| `array` | `[]T` | `[]any` | +| `enum` | `string` | `string` | +| `fixed` | `[n]byte` | `[n]byte` | +| `map` | `map[string]T{}` | `map[string]any` | +| `record` | `struct` | `map[string]any` | +| `union` | *see below* | *see below* | +| `int.date` | `time.Time` | `time.Time` | +| `int.time-millis` | `time.Duration` | `time.Duration` | +| `long.time-micros` | `time.Duration` | `time.Duration` | +| `long.timestamp-millis` | `time.Time` | `time.Time` | +| `long.timestamp-micros` | `time.Time` | `time.Time` | +| `long.local-timestamp-millis` | `time.Time` | `time.Time` | +| `long.local-timestamp-micros` | `time.Time` | `time.Time` | +| `bytes.decimal` | `*big.Rat` | `*big.Rat` | +| `fixed.decimal` | `*big.Rat` | `*big.Rat` | +| `string.uuid` | `string` | `string` | + +\* Please note that the size of the Go type `int` is platform dependent. Decoding an Avro `long` into a Go `int` is +only allowed on 64-bit platforms and will result in an error on 32-bit platforms. Similarly, be careful when encoding a +Go `int` using Avro `int` on a 64-bit platform, as that can result in an integer overflow causing misinterpretation of +the data. + +\** Please note that when the Go type is an unsigned integer care must be taken to ensure that information is not lost +when converting between the Avro type and Go type. For example, storing a *negative* number in Avro of `int = -100` +would be interpreted as `uint16 = 65,436` in Go. Another example would be storing numbers in Avro `int = 256` that +are larger than the Go type `uint8 = 0`. + +##### Unions + +The following union types are accepted: `map[string]any`, `*T` and `any`. + +* **map[string]any:** If the union value is `nil`, a `nil` map will be en/decoded. +When a non-`nil` union value is encountered, a single key is en/decoded. The key is the avro +type name, or scheam full name in the case of a named schema (enum, fixed or record). +* ***T:** This is allowed in a "nullable" union. A nullable union is defined as a two schema union, +with one of the types being `null` (ie. `["null", "string"]` or `["string", "null"]`), in this case +a `*T` is allowed, with `T` matching the conversion table above. In the case of a slice, the slice can be used +directly. +* **any:** An `interface` can be provided and the type or name resolved. Primitive types +are pre-registered, but named types, maps and slices will need to be registered with the `Register` function. +In the case of arrays and maps the enclosed schema type or name is postfix to the type with a `:` separator, +e.g `"map:string"`. Behavior when a type cannot be resolved will depend on your chosen configuation options: + * !Config.UnionResolutionError && !Config.PartialUnionTypeResolution: the map type above is used + * Config.UnionResolutionError && !Config.PartialUnionTypeResolution: an error is returned + * !Config.UnionResolutionError && Config.PartialUnionTypeResolution: any registered type will get resolved while any unregistered type will fallback to the map type above. + * Config.UnionResolutionError && !Config.PartialUnionTypeResolution: any registered type will get resolved while any unregistered type will return an error. + +##### TextMarshaler and TextUnmarshaler + +The interfaces `TextMarshaler` and `TextUnmarshaler` are supported for a `string` schema type. The object will +be tested first for implementation of these interfaces, in the case of a `string` schema, before trying regular +encoding and decoding. + +Enums may also implement `TextMarshaler` and `TextUnmarshaler`, and must resolve to valid symbols in the given enum schema. + +##### Identical Underlying Types + +One type can be [ConvertibleTo](https://go.dev/ref/spec#Conversions) another type if they have identical underlying types. +A non-native type is allowed be used if it can be convertible to *time.Time*, *big.Rat* or *avro.LogicalDuration* for the particular of *LogicalTypes*. + +Ex.: `type Timestamp time.Time` + +##### Untrusted Input With Bytes and Strings + +For security reasons, the configuration `Config.MaxByteSliceSize` restricts the maximum size of `bytes` and `string` types created +by the `Reader`. The default maximum size is `1MiB` and is configurable. This is required to stop untrusted input from consuming all memory and +crashing the application. Should this not be need, setting a negative number will disable the behaviour. + +## Benchmark + +Benchmark source code can be found at: [https://github.com/nrwiersma/avro-benchmarks](https://github.com/nrwiersma/avro-benchmarks) + +``` +BenchmarkGoAvroDecode-8 788455 1505 ns/op 418 B/op 27 allocs/op +BenchmarkGoAvroEncode-8 624343 1908 ns/op 806 B/op 63 allocs/op +BenchmarkGoGenAvroDecode-8 1360375 876.4 ns/op 320 B/op 11 allocs/op +BenchmarkGoGenAvroEncode-8 2801583 425.9 ns/op 240 B/op 3 allocs/op +BenchmarkHambaDecode-8 5046832 238.7 ns/op 47 B/op 0 allocs/op +BenchmarkHambaEncode-8 6017635 196.2 ns/op 112 B/op 1 allocs/op +BenchmarkLinkedinDecode-8 1000000 1003 ns/op 1688 B/op 35 allocs/op +BenchmarkLinkedinEncode-8 3170553 381.5 ns/op 248 B/op 5 allocs/op +``` + +Always benchmark with your own workload. The result depends heavily on the data input. + +## Go structs generation + +Go structs can be generated for you from the schema. The types generated follow the same logic in [types conversions](#types-conversions) + +Install the struct generator with: + +```shell +go install github.com/hamba/avro/v2/cmd/avrogen@ +``` + +Example usage assuming there's a valid schema in `in.avsc`: + +```shell +avrogen -pkg avro -o bla.go -tags json:snake,yaml:upper-camel in.avsc +``` + +**Tip:** Omit `-o FILE` to dump the generated Go structs to stdout instead of a file. + +Check the options and usage with `-h`: + +```shell +avrogen -h +``` + +Or use it as a lib in internal commands, it's the `gen` package + +## Avro schema validation + +### avrosv + +A small Avro schema validation command-line utility is also available. This simple tool leverages the +schema parsing functionality of the library, showing validation errors or optionally dumping parsed +schemas to the console. It can be used in CI/CD pipelines to validate schema changes in a repository. + +Install the Avro schema validator with: + +```shell +go install github.com/hamba/avro/v2/cmd/avrosv@ +``` + +Example usage assuming there's a valid schema in `in.avsc` (exit status code is `0`): + +```shell +avrosv in.avsc +``` + +An invalid schema will result in a diagnostic output and a non-zero exit status code: + +```shell +avrosv bad-default-schema.avsc; echo $? +Error: avro: invalid default for field someString. not a string +2 +``` + +Schemas referencing other schemas can also be validated by providing all of them (schemas are parsed in order): + +```shell +avrosv base-schema.avsc schema-withref.avsc +``` + +Check the options and usage with `-h`: + +```shell +avrosv -h +``` + +### Name Validation + +Avro names are validated according to the +[Avro specification](https://avro.apache.org/docs/1.11.1/specification/#names). + +However, the official Java library does not validate said names accordingly, resulting to some files out in the wild +to have invalid names. Thus, this library has a configuration option to allow for these invalid names to be parsed. + +```go +avro.SkipNameValidation = true +``` + +Note that this variable is global, so ideally you'd need to unset it after you're done with the invalid schema. + +## Go Version Support + +This library supports the last two versions of Go. While the minimum Go version is +not guarantee to increase along side Go, it may jump from time to time to support +additional features. This will be not be considered a breaking change. + +## Who uses hamba/avro? + +- [confluent-kafka-go](https://github.com/confluentinc/confluent-kafka-go) +- [pulsar-client-go](https://github.com/apache/pulsar-client-go) + diff --git a/vendor/github.com/hamba/avro/v2/codec.go b/vendor/github.com/hamba/avro/v2/codec.go new file mode 100644 index 00000000..92c09917 --- /dev/null +++ b/vendor/github.com/hamba/avro/v2/codec.go @@ -0,0 +1,247 @@ +package avro + +import ( + "fmt" + "math/big" + "reflect" + "time" + "unsafe" + + "github.com/modern-go/reflect2" +) + +var ( + timeType = reflect.TypeOf(time.Time{}) + ratType = reflect.TypeOf(big.Rat{}) + durType = reflect.TypeOf(LogicalDuration{}) +) + +type null struct{} + +// ValDecoder represents an internal value decoder. +// +// You should never use ValDecoder directly. +type ValDecoder interface { + Decode(ptr unsafe.Pointer, r *Reader) +} + +// ValEncoder represents an internal value encoder. +// +// You should never use ValEncoder directly. +type ValEncoder interface { + Encode(ptr unsafe.Pointer, w *Writer) +} + +// ReadVal parses Avro value and stores the result in the value pointed to by obj. +func (r *Reader) ReadVal(schema Schema, obj any) { + decoder := r.cfg.getDecoderFromCache(schema.CacheFingerprint(), reflect2.RTypeOf(obj)) + if decoder == nil { + typ := reflect2.TypeOf(obj) + if typ.Kind() != reflect.Ptr { + r.ReportError("ReadVal", "can only unmarshal into pointer") + return + } + decoder = r.cfg.DecoderOf(schema, typ) + } + + ptr := reflect2.PtrOf(obj) + if ptr == nil { + r.ReportError("ReadVal", "can not read into nil pointer") + return + } + + decoder.Decode(ptr, r) +} + +// WriteVal writes the Avro encoding of obj. +func (w *Writer) WriteVal(schema Schema, val any) { + encoder := w.cfg.getEncoderFromCache(schema.Fingerprint(), reflect2.RTypeOf(val)) + if encoder == nil { + typ := reflect2.TypeOf(val) + encoder = w.cfg.EncoderOf(schema, typ) + } + encoder.Encode(reflect2.PtrOf(val), w) +} + +func (c *frozenConfig) DecoderOf(schema Schema, typ reflect2.Type) ValDecoder { + rtype := typ.RType() + decoder := c.getDecoderFromCache(schema.CacheFingerprint(), rtype) + if decoder != nil { + return decoder + } + + ptrType := typ.(*reflect2.UnsafePtrType) + decoder = decoderOfType(newDecoderContext(c), schema, ptrType.Elem()) + c.addDecoderToCache(schema.CacheFingerprint(), rtype, decoder) + return decoder +} + +type deferDecoder struct { + decoder ValDecoder +} + +func (d *deferDecoder) Decode(ptr unsafe.Pointer, r *Reader) { + d.decoder.Decode(ptr, r) +} + +type deferEncoder struct { + encoder ValEncoder +} + +func (d *deferEncoder) Encode(ptr unsafe.Pointer, w *Writer) { + d.encoder.Encode(ptr, w) +} + +type decoderContext struct { + cfg *frozenConfig + decoders map[cacheKey]ValDecoder +} + +func newDecoderContext(cfg *frozenConfig) *decoderContext { + return &decoderContext{ + cfg: cfg, + decoders: make(map[cacheKey]ValDecoder), + } +} + +type encoderContext struct { + cfg *frozenConfig + encoders map[cacheKey]ValEncoder +} + +func newEncoderContext(cfg *frozenConfig) *encoderContext { + return &encoderContext{ + cfg: cfg, + encoders: make(map[cacheKey]ValEncoder), + } +} + +func decoderOfType(d *decoderContext, schema Schema, typ reflect2.Type) ValDecoder { + if dec := createDecoderOfMarshaler(schema, typ); dec != nil { + return dec + } + + // Handle eface (empty interface) case when it isn't a union + if typ.Kind() == reflect.Interface && schema.Type() != Union { + if _, ok := typ.(*reflect2.UnsafeIFaceType); !ok { + return newEfaceDecoder(d, schema) + } + } + + switch schema.Type() { + case String, Bytes, Int, Long, Float, Double, Boolean: + return createDecoderOfNative(schema.(*PrimitiveSchema), typ) + case Record: + key := cacheKey{fingerprint: schema.CacheFingerprint(), rtype: typ.RType()} + defDec := &deferDecoder{} + d.decoders[key] = defDec + defDec.decoder = createDecoderOfRecord(d, schema.(*RecordSchema), typ) + return defDec.decoder + case Ref: + key := cacheKey{fingerprint: schema.(*RefSchema).Schema().CacheFingerprint(), rtype: typ.RType()} + if dec, f := d.decoders[key]; f { + return dec + } + return decoderOfType(d, schema.(*RefSchema).Schema(), typ) + case Enum: + return createDecoderOfEnum(schema.(*EnumSchema), typ) + case Array: + return createDecoderOfArray(d, schema.(*ArraySchema), typ) + case Map: + return createDecoderOfMap(d, schema.(*MapSchema), typ) + case Union: + return createDecoderOfUnion(d, schema.(*UnionSchema), typ) + case Fixed: + return createDecoderOfFixed(schema.(*FixedSchema), typ) + default: + // It is impossible to get here with a valid schema + return &errorDecoder{err: fmt.Errorf("avro: schema type %s is unsupported", schema.Type())} + } +} + +func (c *frozenConfig) EncoderOf(schema Schema, typ reflect2.Type) ValEncoder { + if typ == nil { + typ = reflect2.TypeOf((*null)(nil)) + } + + rtype := typ.RType() + encoder := c.getEncoderFromCache(schema.Fingerprint(), rtype) + if encoder != nil { + return encoder + } + + encoder = encoderOfType(newEncoderContext(c), schema, typ) + if typ.LikePtr() { + encoder = &onePtrEncoder{encoder} + } + c.addEncoderToCache(schema.Fingerprint(), rtype, encoder) + return encoder +} + +type onePtrEncoder struct { + enc ValEncoder +} + +func (e *onePtrEncoder) Encode(ptr unsafe.Pointer, w *Writer) { + e.enc.Encode(noescape(unsafe.Pointer(&ptr)), w) +} + +func encoderOfType(e *encoderContext, schema Schema, typ reflect2.Type) ValEncoder { + if enc := createEncoderOfMarshaler(schema, typ); enc != nil { + return enc + } + + if typ.Kind() == reflect.Interface { + return &interfaceEncoder{schema: schema, typ: typ} + } + + switch schema.Type() { + case String, Bytes, Int, Long, Float, Double, Boolean, Null: + return createEncoderOfNative(schema, typ) + case Record: + key := cacheKey{fingerprint: schema.Fingerprint(), rtype: typ.RType()} + defEnc := &deferEncoder{} + e.encoders[key] = defEnc + defEnc.encoder = createEncoderOfRecord(e, schema.(*RecordSchema), typ) + return defEnc.encoder + case Ref: + key := cacheKey{fingerprint: schema.(*RefSchema).Schema().Fingerprint(), rtype: typ.RType()} + if enc, f := e.encoders[key]; f { + return enc + } + return encoderOfType(e, schema.(*RefSchema).Schema(), typ) + case Enum: + return createEncoderOfEnum(schema.(*EnumSchema), typ) + case Array: + return createEncoderOfArray(e, schema.(*ArraySchema), typ) + case Map: + return createEncoderOfMap(e, schema.(*MapSchema), typ) + case Union: + return createEncoderOfUnion(e, schema.(*UnionSchema), typ) + case Fixed: + return createEncoderOfFixed(schema.(*FixedSchema), typ) + default: + // It is impossible to get here with a valid schema + return &errorEncoder{err: fmt.Errorf("avro: schema type %s is unsupported", schema.Type())} + } +} + +type errorDecoder struct { + err error +} + +func (d *errorDecoder) Decode(_ unsafe.Pointer, r *Reader) { + if r.Error == nil { + r.Error = d.err + } +} + +type errorEncoder struct { + err error +} + +func (e *errorEncoder) Encode(_ unsafe.Pointer, w *Writer) { + if w.Error == nil { + w.Error = e.err + } +} diff --git a/vendor/github.com/hamba/avro/v2/codec_array.go b/vendor/github.com/hamba/avro/v2/codec_array.go new file mode 100644 index 00000000..0b412d93 --- /dev/null +++ b/vendor/github.com/hamba/avro/v2/codec_array.go @@ -0,0 +1,119 @@ +package avro + +import ( + "errors" + "fmt" + "io" + "reflect" + "unsafe" + + "github.com/modern-go/reflect2" +) + +func createDecoderOfArray(d *decoderContext, schema *ArraySchema, typ reflect2.Type) ValDecoder { + if typ.Kind() == reflect.Slice { + return decoderOfArray(d, schema, typ) + } + + return &errorDecoder{err: fmt.Errorf("avro: %s is unsupported for Avro %s", typ.String(), schema.Type())} +} + +func createEncoderOfArray(e *encoderContext, schema *ArraySchema, typ reflect2.Type) ValEncoder { + if typ.Kind() == reflect.Slice { + return encoderOfArray(e, schema, typ) + } + + return &errorEncoder{err: fmt.Errorf("avro: %s is unsupported for Avro %s", typ.String(), schema.Type())} +} + +func decoderOfArray(d *decoderContext, arr *ArraySchema, typ reflect2.Type) ValDecoder { + sliceType := typ.(*reflect2.UnsafeSliceType) + decoder := decoderOfType(d, arr.Items(), sliceType.Elem()) + + return &arrayDecoder{typ: sliceType, decoder: decoder} +} + +type arrayDecoder struct { + typ *reflect2.UnsafeSliceType + decoder ValDecoder +} + +func (d *arrayDecoder) Decode(ptr unsafe.Pointer, r *Reader) { + var size int + sliceType := d.typ + + for { + l, _ := r.ReadBlockHeader() + if l == 0 { + break + } + + start := size + size += int(l) + + if size > r.cfg.getMaxSliceAllocSize() { + r.ReportError("decode array", "size is greater than `Config.MaxSliceAllocSize`") + return + } + + sliceType.UnsafeGrow(ptr, size) + + for i := start; i < size; i++ { + elemPtr := sliceType.UnsafeGetIndex(ptr, i) + d.decoder.Decode(elemPtr, r) + if r.Error != nil { + r.Error = fmt.Errorf("reading %s: %w", d.typ.String(), r.Error) + return + } + } + } + + if r.Error != nil && !errors.Is(r.Error, io.EOF) { + r.Error = fmt.Errorf("%v: %w", d.typ, r.Error) + } +} + +func encoderOfArray(e *encoderContext, arr *ArraySchema, typ reflect2.Type) ValEncoder { + sliceType := typ.(*reflect2.UnsafeSliceType) + encoder := encoderOfType(e, arr.Items(), sliceType.Elem()) + + return &arrayEncoder{ + blockLength: e.cfg.getBlockLength(), + typ: sliceType, + encoder: encoder, + } +} + +type arrayEncoder struct { + blockLength int + typ *reflect2.UnsafeSliceType + encoder ValEncoder +} + +func (e *arrayEncoder) Encode(ptr unsafe.Pointer, w *Writer) { + blockLength := e.blockLength + length := e.typ.UnsafeLengthOf(ptr) + + for i := 0; i < length; i += blockLength { + w.WriteBlockCB(func(w *Writer) int64 { + count := int64(0) + for j := i; j < i+blockLength && j < length; j++ { + elemPtr := e.typ.UnsafeGetIndex(ptr, j) + e.encoder.Encode(elemPtr, w) + if w.Error != nil && !errors.Is(w.Error, io.EOF) { + w.Error = fmt.Errorf("%s: %w", e.typ.String(), w.Error) + return count + } + count++ + } + + return count + }) + } + + w.WriteBlockHeader(0, 0) + + if w.Error != nil && !errors.Is(w.Error, io.EOF) { + w.Error = fmt.Errorf("%v: %w", e.typ, w.Error) + } +} diff --git a/vendor/github.com/hamba/avro/v2/codec_default.go b/vendor/github.com/hamba/avro/v2/codec_default.go new file mode 100644 index 00000000..c42bdc3e --- /dev/null +++ b/vendor/github.com/hamba/avro/v2/codec_default.go @@ -0,0 +1,58 @@ +package avro + +import ( + "fmt" + "unsafe" + + "github.com/modern-go/reflect2" +) + +func createDefaultDecoder(d *decoderContext, field *Field, typ reflect2.Type) ValDecoder { + cfg := d.cfg + fn := func(def any) ([]byte, error) { + defaultType := reflect2.TypeOf(def) + if defaultType == nil { + defaultType = reflect2.TypeOf((*null)(nil)) + } + defaultEncoder := encoderOfType(newEncoderContext(cfg), field.Type(), defaultType) + if defaultType.LikePtr() { + defaultEncoder = &onePtrEncoder{defaultEncoder} + } + w := cfg.borrowWriter() + defer cfg.returnWriter(w) + + defaultEncoder.Encode(reflect2.PtrOf(def), w) + if w.Error != nil { + return nil, w.Error + } + b := w.Buffer() + data := make([]byte, len(b)) + copy(data, b) + + return data, nil + } + + b, err := field.encodeDefault(fn) + if err != nil { + return &errorDecoder{err: fmt.Errorf("decode default: %w", err)} + } + return &defaultDecoder{ + data: b, + decoder: decoderOfType(d, field.Type(), typ), + } +} + +type defaultDecoder struct { + data []byte + decoder ValDecoder +} + +// Decode implements ValDecoder. +func (d *defaultDecoder) Decode(ptr unsafe.Pointer, r *Reader) { + rr := r.cfg.borrowReader(d.data) + defer r.cfg.returnReader(rr) + + d.decoder.Decode(ptr, rr) +} + +var _ ValDecoder = &defaultDecoder{} diff --git a/vendor/github.com/hamba/avro/v2/codec_dynamic.go b/vendor/github.com/hamba/avro/v2/codec_dynamic.go new file mode 100644 index 00000000..f14a04ee --- /dev/null +++ b/vendor/github.com/hamba/avro/v2/codec_dynamic.go @@ -0,0 +1,59 @@ +package avro + +import ( + "reflect" + "unsafe" + + "github.com/modern-go/reflect2" +) + +type efaceDecoder struct { + schema Schema + typ reflect2.Type + dec ValDecoder +} + +func newEfaceDecoder(d *decoderContext, schema Schema) *efaceDecoder { + typ, _ := genericReceiver(schema) + dec := decoderOfType(d, schema, typ) + + return &efaceDecoder{ + schema: schema, + typ: typ, + dec: dec, + } +} + +func (d *efaceDecoder) Decode(ptr unsafe.Pointer, r *Reader) { + pObj := (*any)(ptr) + if *pObj == nil { + *pObj = genericDecode(d.typ, d.dec, r) + return + } + + typ := reflect2.TypeOf(*pObj) + if typ.Kind() != reflect.Ptr { + *pObj = genericDecode(d.typ, d.dec, r) + return + } + + ptrType := typ.(*reflect2.UnsafePtrType) + ptrElemType := ptrType.Elem() + if reflect2.IsNil(*pObj) { + obj := ptrElemType.New() + r.ReadVal(d.schema, obj) + *pObj = obj + return + } + r.ReadVal(d.schema, *pObj) +} + +type interfaceEncoder struct { + schema Schema + typ reflect2.Type +} + +func (e *interfaceEncoder) Encode(ptr unsafe.Pointer, w *Writer) { + obj := e.typ.UnsafeIndirect(ptr) + w.WriteVal(e.schema, obj) +} diff --git a/vendor/github.com/hamba/avro/v2/codec_enum.go b/vendor/github.com/hamba/avro/v2/codec_enum.go new file mode 100644 index 00000000..65ab4535 --- /dev/null +++ b/vendor/github.com/hamba/avro/v2/codec_enum.go @@ -0,0 +1,131 @@ +package avro + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "unsafe" + + "github.com/modern-go/reflect2" +) + +func createDecoderOfEnum(schema *EnumSchema, typ reflect2.Type) ValDecoder { + switch { + case typ.Kind() == reflect.String: + return &enumCodec{enum: schema} + case typ.Implements(textUnmarshalerType): + return &enumTextMarshalerCodec{typ: typ, enum: schema} + case reflect2.PtrTo(typ).Implements(textUnmarshalerType): + return &enumTextMarshalerCodec{typ: typ, enum: schema, ptr: true} + } + + return &errorDecoder{err: fmt.Errorf("avro: %s is unsupported for Avro %s", typ.String(), schema.Type())} +} + +func createEncoderOfEnum(schema *EnumSchema, typ reflect2.Type) ValEncoder { + switch { + case typ.Kind() == reflect.String: + return &enumCodec{enum: schema} + case typ.Implements(textMarshalerType): + return &enumTextMarshalerCodec{typ: typ, enum: schema} + case reflect2.PtrTo(typ).Implements(textMarshalerType): + return &enumTextMarshalerCodec{typ: typ, enum: schema, ptr: true} + } + + return &errorEncoder{err: fmt.Errorf("avro: %s is unsupported for Avro %s", typ.String(), schema.Type())} +} + +type enumCodec struct { + enum *EnumSchema +} + +func (c *enumCodec) Decode(ptr unsafe.Pointer, r *Reader) { + i := int(r.ReadInt()) + + symbol, ok := c.enum.Symbol(i) + if !ok { + r.ReportError("decode enum symbol", "unknown enum symbol") + return + } + + *((*string)(ptr)) = symbol +} + +func (c *enumCodec) Encode(ptr unsafe.Pointer, w *Writer) { + str := *((*string)(ptr)) + for i, sym := range c.enum.symbols { + if str != sym { + continue + } + + w.WriteInt(int32(i)) + return + } + + w.Error = fmt.Errorf("avro: unknown enum symbol: %s", str) +} + +type enumTextMarshalerCodec struct { + typ reflect2.Type + enum *EnumSchema + ptr bool +} + +func (c *enumTextMarshalerCodec) Decode(ptr unsafe.Pointer, r *Reader) { + i := int(r.ReadInt()) + + symbol, ok := c.enum.Symbol(i) + if !ok { + r.ReportError("decode enum symbol", "unknown enum symbol") + return + } + + var obj any + if c.ptr { + obj = c.typ.PackEFace(ptr) + } else { + obj = c.typ.UnsafeIndirect(ptr) + } + if reflect2.IsNil(obj) { + ptrType := c.typ.(*reflect2.UnsafePtrType) + newPtr := ptrType.Elem().UnsafeNew() + *((*unsafe.Pointer)(ptr)) = newPtr + obj = c.typ.UnsafeIndirect(ptr) + } + unmarshaler := (obj).(encoding.TextUnmarshaler) + if err := unmarshaler.UnmarshalText([]byte(symbol)); err != nil { + r.ReportError("decode enum text unmarshaler", err.Error()) + } +} + +func (c *enumTextMarshalerCodec) Encode(ptr unsafe.Pointer, w *Writer) { + var obj any + if c.ptr { + obj = c.typ.PackEFace(ptr) + } else { + obj = c.typ.UnsafeIndirect(ptr) + } + if c.typ.IsNullable() && reflect2.IsNil(obj) { + w.Error = errors.New("encoding nil enum text marshaler") + return + } + marshaler := (obj).(encoding.TextMarshaler) + b, err := marshaler.MarshalText() + if err != nil { + w.Error = err + return + } + + str := string(b) + for i, sym := range c.enum.symbols { + if str != sym { + continue + } + + w.WriteInt(int32(i)) + return + } + + w.Error = fmt.Errorf("avro: unknown enum symbol: %s", str) +} diff --git a/vendor/github.com/hamba/avro/v2/codec_fixed.go b/vendor/github.com/hamba/avro/v2/codec_fixed.go new file mode 100644 index 00000000..887defd1 --- /dev/null +++ b/vendor/github.com/hamba/avro/v2/codec_fixed.go @@ -0,0 +1,192 @@ +package avro + +import ( + "encoding/binary" + "fmt" + "math/big" + "reflect" + "unsafe" + + "github.com/modern-go/reflect2" +) + +func createDecoderOfFixed(fixed *FixedSchema, typ reflect2.Type) ValDecoder { + switch typ.Kind() { + case reflect.Array: + arrayType := typ.(reflect2.ArrayType) + if arrayType.Elem().Kind() != reflect.Uint8 || arrayType.Len() != fixed.Size() { + break + } + return &fixedCodec{arrayType: typ.(*reflect2.UnsafeArrayType)} + case reflect.Uint64: + if fixed.Size() != 8 { + break + } + + return &fixedUint64Codec{} + case reflect.Ptr: + ptrType := typ.(*reflect2.UnsafePtrType) + elemType := ptrType.Elem() + + ls := fixed.Logical() + tpy1 := elemType.Type1() + if elemType.Kind() != reflect.Struct || !tpy1.ConvertibleTo(ratType) || ls == nil || + ls.Type() != Decimal { + break + } + dec := ls.(*DecimalLogicalSchema) + return &fixedDecimalCodec{prec: dec.Precision(), scale: dec.Scale(), size: fixed.Size()} + case reflect.Struct: + ls := fixed.Logical() + if ls == nil { + break + } + typ1 := typ.Type1() + if !typ1.ConvertibleTo(durType) || ls.Type() != Duration { + break + } + return &fixedDurationCodec{} + } + + return &errorDecoder{ + err: fmt.Errorf("avro: %s is unsupported for Avro %s, size=%d", typ.String(), fixed.Type(), fixed.Size()), + } +} + +func createEncoderOfFixed(fixed *FixedSchema, typ reflect2.Type) ValEncoder { + switch typ.Kind() { + case reflect.Array: + arrayType := typ.(reflect2.ArrayType) + if arrayType.Elem().Kind() != reflect.Uint8 || arrayType.Len() != fixed.Size() { + break + } + return &fixedCodec{arrayType: typ.(*reflect2.UnsafeArrayType)} + case reflect.Uint64: + if fixed.Size() != 8 { + break + } + + return &fixedUint64Codec{} + case reflect.Ptr: + ptrType := typ.(*reflect2.UnsafePtrType) + elemType := ptrType.Elem() + + ls := fixed.Logical() + tpy1 := elemType.Type1() + if elemType.Kind() != reflect.Struct || !tpy1.ConvertibleTo(ratType) || ls == nil || + ls.Type() != Decimal { + break + } + dec := ls.(*DecimalLogicalSchema) + return &fixedDecimalCodec{prec: dec.Precision(), scale: dec.Scale(), size: fixed.Size()} + + case reflect.Struct: + ls := fixed.Logical() + if ls == nil { + break + } + typ1 := typ.Type1() + if typ1.ConvertibleTo(durType) && ls.Type() == Duration { + return &fixedDurationCodec{} + } + } + + return &errorEncoder{ + err: fmt.Errorf("avro: %s is unsupported for Avro %s, size=%d", typ.String(), fixed.Type(), fixed.Size()), + } +} + +type fixedUint64Codec [8]byte + +func (c *fixedUint64Codec) Decode(ptr unsafe.Pointer, r *Reader) { + buffer := c[:] + r.Read(buffer) + *(*uint64)(ptr) = binary.BigEndian.Uint64(buffer) +} + +func (c *fixedUint64Codec) Encode(ptr unsafe.Pointer, w *Writer) { + buffer := c[:] + binary.BigEndian.PutUint64(buffer, *(*uint64)(ptr)) + _, _ = w.Write(buffer) +} + +type fixedCodec struct { + arrayType *reflect2.UnsafeArrayType +} + +func (c *fixedCodec) Decode(ptr unsafe.Pointer, r *Reader) { + for i := 0; i < c.arrayType.Len(); i++ { + c.arrayType.UnsafeSetIndex(ptr, i, reflect2.PtrOf(r.readByte())) + } +} + +func (c *fixedCodec) Encode(ptr unsafe.Pointer, w *Writer) { + for i := 0; i < c.arrayType.Len(); i++ { + bytePtr := c.arrayType.UnsafeGetIndex(ptr, i) + w.writeByte(*((*byte)(bytePtr))) + } +} + +type fixedDecimalCodec struct { + prec int + scale int + size int +} + +func (c *fixedDecimalCodec) Decode(ptr unsafe.Pointer, r *Reader) { + b := make([]byte, c.size) + r.Read(b) + *((**big.Rat)(ptr)) = ratFromBytes(b, c.scale) +} + +func (c *fixedDecimalCodec) Encode(ptr unsafe.Pointer, w *Writer) { + r := *((**big.Rat)(ptr)) + scale := new(big.Int).Exp(big.NewInt(10), big.NewInt(int64(c.scale)), nil) + i := (&big.Int{}).Mul(r.Num(), scale) + i = i.Div(i, r.Denom()) + + var b []byte + switch i.Sign() { + case 0: + b = make([]byte, c.size) + + case 1: + b = i.Bytes() + if b[0]&0x80 > 0 { + b = append([]byte{0}, b...) + } + if len(b) < c.size { + padded := make([]byte, c.size) + copy(padded[c.size-len(b):], b) + b = padded + } + + case -1: + b = i.Add(i, (&big.Int{}).Lsh(one, uint(c.size*8))).Bytes() + } + + _, _ = w.Write(b) +} + +type fixedDurationCodec struct{} + +func (*fixedDurationCodec) Decode(ptr unsafe.Pointer, r *Reader) { + b := make([]byte, 12) + r.Read(b) + var duration LogicalDuration + duration.Months = binary.LittleEndian.Uint32(b[0:4]) + duration.Days = binary.LittleEndian.Uint32(b[4:8]) + duration.Milliseconds = binary.LittleEndian.Uint32(b[8:12]) + *((*LogicalDuration)(ptr)) = duration +} + +func (*fixedDurationCodec) Encode(ptr unsafe.Pointer, w *Writer) { + duration := (*LogicalDuration)(ptr) + b := make([]byte, 4) + binary.LittleEndian.PutUint32(b, duration.Months) + _, _ = w.Write(b) + binary.LittleEndian.PutUint32(b, duration.Days) + _, _ = w.Write(b) + binary.LittleEndian.PutUint32(b, duration.Milliseconds) + _, _ = w.Write(b) +} diff --git a/vendor/github.com/hamba/avro/v2/codec_generic.go b/vendor/github.com/hamba/avro/v2/codec_generic.go new file mode 100644 index 00000000..1384e9ee --- /dev/null +++ b/vendor/github.com/hamba/avro/v2/codec_generic.go @@ -0,0 +1,131 @@ +package avro + +import ( + "errors" + "math/big" + "time" + + "github.com/modern-go/reflect2" +) + +func genericDecode(typ reflect2.Type, dec ValDecoder, r *Reader) any { + ptr := typ.UnsafeNew() + dec.Decode(ptr, r) + if r.Error != nil { + return nil + } + + obj := typ.UnsafeIndirect(ptr) + if reflect2.IsNil(obj) { + return nil + } + return obj +} + +func genericReceiver(schema Schema) (reflect2.Type, error) { + if schema.Type() == Ref { + schema = schema.(*RefSchema).Schema() + } + + var ls LogicalSchema + lts, ok := schema.(LogicalTypeSchema) + if ok { + ls = lts.Logical() + } + + schemaName := string(schema.Type()) + if ls != nil { + schemaName += "." + string(ls.Type()) + } + + switch schema.Type() { + case Boolean: + var v bool + return reflect2.TypeOf(v), nil + case Int: + if ls != nil { + switch ls.Type() { + case Date: + var v time.Time + return reflect2.TypeOf(v), nil + + case TimeMillis: + var v time.Duration + return reflect2.TypeOf(v), nil + } + } + var v int + return reflect2.TypeOf(v), nil + case Long: + if ls != nil { + switch ls.Type() { + case TimeMicros: + var v time.Duration + return reflect2.TypeOf(v), nil + case TimestampMillis: + var v time.Time + return reflect2.TypeOf(v), nil + case TimestampMicros: + var v time.Time + return reflect2.TypeOf(v), nil + case LocalTimestampMillis: + var v time.Time + return reflect2.TypeOf(v), nil + case LocalTimestampMicros: + var v time.Time + return reflect2.TypeOf(v), nil + } + } + var v int64 + return reflect2.TypeOf(v), nil + case Float: + var v float32 + return reflect2.TypeOf(v), nil + case Double: + var v float64 + return reflect2.TypeOf(v), nil + case String: + var v string + return reflect2.TypeOf(v), nil + case Bytes: + if ls != nil && ls.Type() == Decimal { + var v *big.Rat + return reflect2.TypeOf(v), nil + } + var v []byte + return reflect2.TypeOf(v), nil + case Record: + var v map[string]any + return reflect2.TypeOf(v), nil + case Enum: + var v string + return reflect2.TypeOf(v), nil + case Array: + v := make([]any, 0) + return reflect2.TypeOf(v), nil + case Map: + var v map[string]any + return reflect2.TypeOf(v), nil + case Union: + var v map[string]any + return reflect2.TypeOf(v), nil + case Fixed: + fixed := schema.(*FixedSchema) + ls := fixed.Logical() + if ls != nil { + switch ls.Type() { + case Duration: + var v LogicalDuration + return reflect2.TypeOf(v), nil + case Decimal: + var v *big.Rat + return reflect2.TypeOf(v), nil + } + } + v := byteSliceToArray(make([]byte, fixed.Size()), fixed.Size()) + return reflect2.TypeOf(v), nil + default: + // This should not be possible. + return nil, errors.New("dynamic receiver not found for schema " + schemaName) + } +} diff --git a/vendor/github.com/hamba/avro/v2/codec_map.go b/vendor/github.com/hamba/avro/v2/codec_map.go new file mode 100644 index 00000000..ceefa008 --- /dev/null +++ b/vendor/github.com/hamba/avro/v2/codec_map.go @@ -0,0 +1,246 @@ +package avro + +import ( + "encoding" + "errors" + "fmt" + "io" + "reflect" + "unsafe" + + "github.com/modern-go/reflect2" +) + +func createDecoderOfMap(d *decoderContext, schema *MapSchema, typ reflect2.Type) ValDecoder { + if typ.Kind() == reflect.Map { + keyType := typ.(reflect2.MapType).Key() + switch { + case keyType.Kind() == reflect.String: + return decoderOfMap(d, schema, typ) + case keyType.Implements(textUnmarshalerType): + return decoderOfMapUnmarshaler(d, schema, typ) + } + } + + return &errorDecoder{err: fmt.Errorf("avro: %s is unsupported for Avro %s", typ.String(), schema.Type())} +} + +func createEncoderOfMap(e *encoderContext, schema *MapSchema, typ reflect2.Type) ValEncoder { + if typ.Kind() == reflect.Map { + keyType := typ.(reflect2.MapType).Key() + switch { + case keyType.Kind() == reflect.String: + return encoderOfMap(e, schema, typ) + case keyType.Implements(textMarshalerType): + return encoderOfMapMarshaler(e, schema, typ) + } + } + + return &errorEncoder{err: fmt.Errorf("avro: %s is unsupported for Avro %s", typ.String(), schema.Type())} +} + +func decoderOfMap(d *decoderContext, m *MapSchema, typ reflect2.Type) ValDecoder { + mapType := typ.(*reflect2.UnsafeMapType) + decoder := decoderOfType(d, m.Values(), mapType.Elem()) + + return &mapDecoder{ + mapType: mapType, + elemType: mapType.Elem(), + decoder: decoder, + } +} + +type mapDecoder struct { + mapType *reflect2.UnsafeMapType + elemType reflect2.Type + decoder ValDecoder +} + +func (d *mapDecoder) Decode(ptr unsafe.Pointer, r *Reader) { + if d.mapType.UnsafeIsNil(ptr) { + d.mapType.UnsafeSet(ptr, d.mapType.UnsafeMakeMap(0)) + } + + for { + l, _ := r.ReadBlockHeader() + if l == 0 { + break + } + + for i := int64(0); i < l; i++ { + keyPtr := reflect2.PtrOf(r.ReadString()) + elemPtr := d.elemType.UnsafeNew() + d.decoder.Decode(elemPtr, r) + if r.Error != nil { + r.Error = fmt.Errorf("reading map[string]%s: %w", d.elemType.String(), r.Error) + return + } + + d.mapType.UnsafeSetIndex(ptr, keyPtr, elemPtr) + } + } + + if r.Error != nil && !errors.Is(r.Error, io.EOF) { + r.Error = fmt.Errorf("%v: %w", d.mapType, r.Error) + } +} + +func decoderOfMapUnmarshaler(d *decoderContext, m *MapSchema, typ reflect2.Type) ValDecoder { + mapType := typ.(*reflect2.UnsafeMapType) + decoder := decoderOfType(d, m.Values(), mapType.Elem()) + + return &mapDecoderUnmarshaler{ + mapType: mapType, + keyType: mapType.Key(), + elemType: mapType.Elem(), + decoder: decoder, + } +} + +type mapDecoderUnmarshaler struct { + mapType *reflect2.UnsafeMapType + keyType reflect2.Type + elemType reflect2.Type + decoder ValDecoder +} + +func (d *mapDecoderUnmarshaler) Decode(ptr unsafe.Pointer, r *Reader) { + if d.mapType.UnsafeIsNil(ptr) { + d.mapType.UnsafeSet(ptr, d.mapType.UnsafeMakeMap(0)) + } + + for { + l, _ := r.ReadBlockHeader() + if l == 0 { + break + } + + for i := int64(0); i < l; i++ { + keyPtr := d.keyType.UnsafeNew() + keyObj := d.keyType.UnsafeIndirect(keyPtr) + if reflect2.IsNil(keyObj) { + ptrType := d.keyType.(*reflect2.UnsafePtrType) + newPtr := ptrType.Elem().UnsafeNew() + *((*unsafe.Pointer)(keyPtr)) = newPtr + keyObj = d.keyType.UnsafeIndirect(keyPtr) + } + unmarshaler := keyObj.(encoding.TextUnmarshaler) + err := unmarshaler.UnmarshalText([]byte(r.ReadString())) + if err != nil { + r.ReportError("mapDecoderUnmarshaler", err.Error()) + return + } + + elemPtr := d.elemType.UnsafeNew() + d.decoder.Decode(elemPtr, r) + + d.mapType.UnsafeSetIndex(ptr, keyPtr, elemPtr) + } + } + + if r.Error != nil && !errors.Is(r.Error, io.EOF) { + r.Error = fmt.Errorf("%v: %w", d.mapType, r.Error) + } +} + +func encoderOfMap(e *encoderContext, m *MapSchema, typ reflect2.Type) ValEncoder { + mapType := typ.(*reflect2.UnsafeMapType) + encoder := encoderOfType(e, m.Values(), mapType.Elem()) + + return &mapEncoder{ + blockLength: e.cfg.getBlockLength(), + mapType: mapType, + encoder: encoder, + } +} + +type mapEncoder struct { + blockLength int + mapType *reflect2.UnsafeMapType + encoder ValEncoder +} + +func (e *mapEncoder) Encode(ptr unsafe.Pointer, w *Writer) { + blockLength := e.blockLength + + iter := e.mapType.UnsafeIterate(ptr) + + for { + wrote := w.WriteBlockCB(func(w *Writer) int64 { + var i int + for i = 0; iter.HasNext() && i < blockLength; i++ { + keyPtr, elemPtr := iter.UnsafeNext() + w.WriteString(*((*string)(keyPtr))) + e.encoder.Encode(elemPtr, w) + } + + return int64(i) + }) + + if wrote == 0 { + break + } + } + + if w.Error != nil && !errors.Is(w.Error, io.EOF) { + w.Error = fmt.Errorf("%v: %w", e.mapType, w.Error) + } +} + +func encoderOfMapMarshaler(e *encoderContext, m *MapSchema, typ reflect2.Type) ValEncoder { + mapType := typ.(*reflect2.UnsafeMapType) + encoder := encoderOfType(e, m.Values(), mapType.Elem()) + + return &mapEncoderMarshaller{ + blockLength: e.cfg.getBlockLength(), + mapType: mapType, + keyType: mapType.Key(), + encoder: encoder, + } +} + +type mapEncoderMarshaller struct { + blockLength int + mapType *reflect2.UnsafeMapType + keyType reflect2.Type + encoder ValEncoder +} + +func (e *mapEncoderMarshaller) Encode(ptr unsafe.Pointer, w *Writer) { + blockLength := e.blockLength + + iter := e.mapType.UnsafeIterate(ptr) + + for { + wrote := w.WriteBlockCB(func(w *Writer) int64 { + var i int + for i = 0; iter.HasNext() && i < blockLength; i++ { + keyPtr, elemPtr := iter.UnsafeNext() + + obj := e.keyType.UnsafeIndirect(keyPtr) + if e.keyType.IsNullable() && reflect2.IsNil(obj) { + w.Error = errors.New("avro: mapEncoderMarshaller: encoding nil TextMarshaller") + return int64(0) + } + marshaler := (obj).(encoding.TextMarshaler) + b, err := marshaler.MarshalText() + if err != nil { + w.Error = err + return int64(0) + } + w.WriteString(string(b)) + + e.encoder.Encode(elemPtr, w) + } + return int64(i) + }) + + if wrote == 0 { + break + } + } + + if w.Error != nil && !errors.Is(w.Error, io.EOF) { + w.Error = fmt.Errorf("%v: %w", e.mapType, w.Error) + } +} diff --git a/vendor/github.com/hamba/avro/v2/codec_marshaler.go b/vendor/github.com/hamba/avro/v2/codec_marshaler.go new file mode 100644 index 00000000..d783d177 --- /dev/null +++ b/vendor/github.com/hamba/avro/v2/codec_marshaler.go @@ -0,0 +1,70 @@ +package avro + +import ( + "encoding" + "unsafe" + + "github.com/modern-go/reflect2" +) + +var ( + textMarshalerType = reflect2.TypeOfPtr((*encoding.TextMarshaler)(nil)).Elem() + textUnmarshalerType = reflect2.TypeOfPtr((*encoding.TextUnmarshaler)(nil)).Elem() +) + +func createDecoderOfMarshaler(schema Schema, typ reflect2.Type) ValDecoder { + if typ.Implements(textUnmarshalerType) && schema.Type() == String { + return &textMarshalerCodec{typ} + } + ptrType := reflect2.PtrTo(typ) + if ptrType.Implements(textUnmarshalerType) && schema.Type() == String { + return &referenceDecoder{ + &textMarshalerCodec{ptrType}, + } + } + return nil +} + +func createEncoderOfMarshaler(schema Schema, typ reflect2.Type) ValEncoder { + if typ.Implements(textMarshalerType) && schema.Type() == String { + return &textMarshalerCodec{ + typ: typ, + } + } + return nil +} + +type textMarshalerCodec struct { + typ reflect2.Type +} + +func (c textMarshalerCodec) Decode(ptr unsafe.Pointer, r *Reader) { + obj := c.typ.UnsafeIndirect(ptr) + if reflect2.IsNil(obj) { + ptrType := c.typ.(*reflect2.UnsafePtrType) + newPtr := ptrType.Elem().UnsafeNew() + *((*unsafe.Pointer)(ptr)) = newPtr + obj = c.typ.UnsafeIndirect(ptr) + } + unmarshaler := (obj).(encoding.TextUnmarshaler) + b := r.ReadBytes() + err := unmarshaler.UnmarshalText(b) + if err != nil { + r.ReportError("textMarshalerCodec", err.Error()) + } +} + +func (c textMarshalerCodec) Encode(ptr unsafe.Pointer, w *Writer) { + obj := c.typ.UnsafeIndirect(ptr) + if c.typ.IsNullable() && reflect2.IsNil(obj) { + w.WriteBytes(nil) + return + } + marshaler := (obj).(encoding.TextMarshaler) + b, err := marshaler.MarshalText() + if err != nil { + w.Error = err + return + } + w.WriteBytes(b) +} diff --git a/vendor/github.com/hamba/avro/v2/codec_native.go b/vendor/github.com/hamba/avro/v2/codec_native.go new file mode 100644 index 00000000..e4c5a412 --- /dev/null +++ b/vendor/github.com/hamba/avro/v2/codec_native.go @@ -0,0 +1,666 @@ +package avro + +import ( + "fmt" + "math/big" + "reflect" + "strconv" + "time" + "unsafe" + + "github.com/modern-go/reflect2" +) + +//nolint:maintidx // Splitting this would not make it simpler. +func createDecoderOfNative(schema *PrimitiveSchema, typ reflect2.Type) ValDecoder { + resolved := schema.encodedType != "" + switch typ.Kind() { + case reflect.Bool: + if schema.Type() != Boolean { + break + } + return &boolCodec{} + + case reflect.Int: + switch schema.Type() { + case Int: + return &intCodec[int]{} + case Long: + if strconv.IntSize == 64 { + // allow decoding into int when it's 64-bit + return &longCodec[int]{} + } + } + + case reflect.Int8: + if schema.Type() != Int { + break + } + return &intCodec[int8]{} + + case reflect.Uint8: + if schema.Type() != Int { + break + } + return &intCodec[uint8]{} + + case reflect.Int16: + if schema.Type() != Int { + break + } + return &intCodec[int16]{} + + case reflect.Uint16: + if schema.Type() != Int { + break + } + return &intCodec[uint16]{} + + case reflect.Int32: + if schema.Type() != Int { + break + } + return &intCodec[int32]{} + + case reflect.Uint32: + if schema.Type() != Long { + break + } + if resolved { + return &longConvCodec[uint32]{convert: createLongConverter(schema.encodedType)} + } + return &longCodec[uint32]{} + + case reflect.Int64: + st := schema.Type() + lt := getLogicalType(schema) + switch { + case st == Int && lt == TimeMillis: // time.Duration + return &timeMillisCodec{} + + case st == Long && lt == TimeMicros: // time.Duration + return &timeMicrosCodec{ + convert: createLongConverter(schema.encodedType), + } + + case st == Long && lt == "": + if resolved { + return &longConvCodec[int64]{convert: createLongConverter(schema.encodedType)} + } + return &longCodec[int64]{} + + case lt != "": + return &errorDecoder{err: fmt.Errorf("avro: %s is unsupported for Avro %s and logicalType %s", + typ.String(), schema.Type(), lt)} + + default: + break + } + + case reflect.Float32: + if schema.Type() != Float { + break + } + if resolved { + return &float32ConvCodec{convert: createFloatConverter(schema.encodedType)} + } + return &float32Codec{} + + case reflect.Float64: + if schema.Type() != Double { + break + } + if resolved { + return &float64ConvCodec{convert: createDoubleConverter(schema.encodedType)} + } + return &float64Codec{} + + case reflect.String: + if schema.Type() != String { + break + } + return &stringCodec{} + + case reflect.Slice: + if typ.(reflect2.SliceType).Elem().Kind() != reflect.Uint8 || schema.Type() != Bytes { + break + } + return &bytesCodec{sliceType: typ.(*reflect2.UnsafeSliceType)} + + case reflect.Struct: + st := schema.Type() + ls := getLogicalSchema(schema) + lt := getLogicalType(schema) + isTime := typ.Type1().ConvertibleTo(timeType) + switch { + case isTime && st == Int && lt == Date: + return &dateCodec{} + case isTime && st == Long && lt == TimestampMillis: + return ×tampMillisCodec{ + convert: createLongConverter(schema.encodedType), + } + case isTime && st == Long && lt == TimestampMicros: + return ×tampMicrosCodec{ + convert: createLongConverter(schema.encodedType), + } + case isTime && st == Long && lt == LocalTimestampMillis: + return ×tampMillisCodec{ + local: true, + convert: createLongConverter(schema.encodedType), + } + case isTime && st == Long && lt == LocalTimestampMicros: + return ×tampMicrosCodec{ + local: true, + convert: createLongConverter(schema.encodedType), + } + case typ.Type1().ConvertibleTo(ratType) && st == Bytes && lt == Decimal: + dec := ls.(*DecimalLogicalSchema) + return &bytesDecimalCodec{prec: dec.Precision(), scale: dec.Scale()} + + default: + break + } + case reflect.Ptr: + ptrType := typ.(*reflect2.UnsafePtrType) + elemType := ptrType.Elem() + tpy1 := elemType.Type1() + ls := getLogicalSchema(schema) + if ls == nil { + break + } + if !tpy1.ConvertibleTo(ratType) || schema.Type() != Bytes || ls.Type() != Decimal { + break + } + dec := ls.(*DecimalLogicalSchema) + + return &bytesDecimalPtrCodec{prec: dec.Precision(), scale: dec.Scale()} + } + + return &errorDecoder{err: fmt.Errorf("avro: %s is unsupported for Avro %s", typ.String(), schema.Type())} +} + +//nolint:maintidx // Splitting this would not make it simpler. +func createEncoderOfNative(schema Schema, typ reflect2.Type) ValEncoder { + switch typ.Kind() { + case reflect.Bool: + if schema.Type() != Boolean { + break + } + return &boolCodec{} + + case reflect.Int: + switch schema.Type() { + case Int: + return &intCodec[int]{} + case Long: + return &longCodec[int]{} + } + + case reflect.Int8: + if schema.Type() != Int { + break + } + return &intCodec[int8]{} + + case reflect.Uint8: + if schema.Type() != Int { + break + } + return &intCodec[uint8]{} + + case reflect.Int16: + if schema.Type() != Int { + break + } + return &intCodec[int16]{} + + case reflect.Uint16: + if schema.Type() != Int { + break + } + return &intCodec[uint16]{} + + case reflect.Int32: + switch schema.Type() { + case Long: + return &longCodec[int32]{} + + case Int: + return &intCodec[int32]{} + } + + case reflect.Uint32: + if schema.Type() != Long { + break + } + return &longCodec[uint32]{} + + case reflect.Int64: + st := schema.Type() + lt := getLogicalType(schema) + switch { + case st == Int && lt == TimeMillis: // time.Duration + return &timeMillisCodec{} + + case st == Long && lt == TimeMicros: // time.Duration + return &timeMicrosCodec{} + + case st == Long && lt == "": + return &longCodec[int64]{} + + case lt != "": + return &errorEncoder{err: fmt.Errorf("avro: %s is unsupported for Avro %s and logicalType %s", + typ.String(), schema.Type(), lt)} + + default: + break + } + + case reflect.Float32: + switch schema.Type() { + case Double: + return &float32DoubleCodec{} + case Float: + return &float32Codec{} + } + + case reflect.Float64: + if schema.Type() != Double { + break + } + return &float64Codec{} + + case reflect.String: + if schema.Type() != String { + break + } + return &stringCodec{} + + case reflect.Slice: + if typ.(reflect2.SliceType).Elem().Kind() != reflect.Uint8 || schema.Type() != Bytes { + break + } + return &bytesCodec{sliceType: typ.(*reflect2.UnsafeSliceType)} + + case reflect.Struct: + st := schema.Type() + lt := getLogicalType(schema) + isTime := typ.Type1().ConvertibleTo(timeType) + switch { + case isTime && st == Int && lt == Date: + return &dateCodec{} + case isTime && st == Long && lt == TimestampMillis: + return ×tampMillisCodec{} + case isTime && st == Long && lt == TimestampMicros: + return ×tampMicrosCodec{} + case isTime && st == Long && lt == LocalTimestampMillis: + return ×tampMillisCodec{local: true} + case isTime && st == Long && lt == LocalTimestampMicros: + return ×tampMicrosCodec{local: true} + case typ.Type1().ConvertibleTo(ratType) && st != Bytes || lt == Decimal: + ls := getLogicalSchema(schema) + dec := ls.(*DecimalLogicalSchema) + return &bytesDecimalCodec{prec: dec.Precision(), scale: dec.Scale()} + default: + break + } + + case reflect.Ptr: + ptrType := typ.(*reflect2.UnsafePtrType) + elemType := ptrType.Elem() + tpy1 := elemType.Type1() + ls := getLogicalSchema(schema) + if ls == nil { + break + } + if !tpy1.ConvertibleTo(ratType) || schema.Type() != Bytes || ls.Type() != Decimal { + break + } + dec := ls.(*DecimalLogicalSchema) + + return &bytesDecimalPtrCodec{prec: dec.Precision(), scale: dec.Scale()} + } + + if schema.Type() == Null { + return &nullCodec{} + } + + return &errorEncoder{err: fmt.Errorf("avro: %s is unsupported for Avro %s", typ.String(), schema.Type())} +} + +func getLogicalSchema(schema Schema) LogicalSchema { + lts, ok := schema.(LogicalTypeSchema) + if !ok { + return nil + } + + return lts.Logical() +} + +func getLogicalType(schema Schema) LogicalType { + ls := getLogicalSchema(schema) + if ls == nil { + return "" + } + + return ls.Type() +} + +type nullCodec struct{} + +func (*nullCodec) Encode(unsafe.Pointer, *Writer) {} + +type boolCodec struct{} + +func (*boolCodec) Decode(ptr unsafe.Pointer, r *Reader) { + *((*bool)(ptr)) = r.ReadBool() +} + +func (*boolCodec) Encode(ptr unsafe.Pointer, w *Writer) { + w.WriteBool(*((*bool)(ptr))) +} + +type smallInt interface { + ~int | ~int8 | ~int16 | ~int32 | ~uint | ~uint8 | ~uint16 +} + +type intCodec[T smallInt] struct{} + +func (*intCodec[T]) Decode(ptr unsafe.Pointer, r *Reader) { + *((*T)(ptr)) = T(r.ReadInt()) +} + +func (*intCodec[T]) Encode(ptr unsafe.Pointer, w *Writer) { + w.WriteInt(int32(*((*T)(ptr)))) +} + +type largeInt interface { + ~int | ~int32 | ~uint32 | int64 +} + +type longCodec[T largeInt] struct{} + +func (c *longCodec[T]) Decode(ptr unsafe.Pointer, r *Reader) { + *((*T)(ptr)) = T(r.ReadLong()) +} + +func (*longCodec[T]) Encode(ptr unsafe.Pointer, w *Writer) { + w.WriteLong(int64(*((*T)(ptr)))) +} + +type longConvCodec[T largeInt] struct { + convert func(*Reader) int64 +} + +func (c *longConvCodec[T]) Decode(ptr unsafe.Pointer, r *Reader) { + *((*T)(ptr)) = T(c.convert(r)) +} + +type float32Codec struct{} + +func (c *float32Codec) Decode(ptr unsafe.Pointer, r *Reader) { + *((*float32)(ptr)) = r.ReadFloat() +} + +func (*float32Codec) Encode(ptr unsafe.Pointer, w *Writer) { + w.WriteFloat(*((*float32)(ptr))) +} + +type float32ConvCodec struct { + convert func(*Reader) float32 +} + +func (c *float32ConvCodec) Decode(ptr unsafe.Pointer, r *Reader) { + *((*float32)(ptr)) = c.convert(r) +} + +type float32DoubleCodec struct{} + +func (*float32DoubleCodec) Encode(ptr unsafe.Pointer, w *Writer) { + w.WriteDouble(float64(*((*float32)(ptr)))) +} + +type float64Codec struct{} + +func (c *float64Codec) Decode(ptr unsafe.Pointer, r *Reader) { + *((*float64)(ptr)) = r.ReadDouble() +} + +func (*float64Codec) Encode(ptr unsafe.Pointer, w *Writer) { + w.WriteDouble(*((*float64)(ptr))) +} + +type float64ConvCodec struct { + convert func(*Reader) float64 +} + +func (c *float64ConvCodec) Decode(ptr unsafe.Pointer, r *Reader) { + *((*float64)(ptr)) = c.convert(r) +} + +type stringCodec struct{} + +func (c *stringCodec) Decode(ptr unsafe.Pointer, r *Reader) { + *((*string)(ptr)) = r.ReadString() +} + +func (*stringCodec) Encode(ptr unsafe.Pointer, w *Writer) { + w.WriteString(*((*string)(ptr))) +} + +type bytesCodec struct { + sliceType *reflect2.UnsafeSliceType +} + +func (c *bytesCodec) Decode(ptr unsafe.Pointer, r *Reader) { + b := r.ReadBytes() + c.sliceType.UnsafeSet(ptr, reflect2.PtrOf(b)) +} + +func (c *bytesCodec) Encode(ptr unsafe.Pointer, w *Writer) { + w.WriteBytes(*((*[]byte)(ptr))) +} + +type dateCodec struct{} + +func (c *dateCodec) Decode(ptr unsafe.Pointer, r *Reader) { + i := r.ReadInt() + sec := int64(i) * int64(24*time.Hour/time.Second) + *((*time.Time)(ptr)) = time.Unix(sec, 0).UTC() +} + +func (c *dateCodec) Encode(ptr unsafe.Pointer, w *Writer) { + t := *((*time.Time)(ptr)) + days := t.Unix() / int64(24*time.Hour/time.Second) + w.WriteInt(int32(days)) +} + +type timestampMillisCodec struct { + local bool + convert func(*Reader) int64 +} + +func (c *timestampMillisCodec) Decode(ptr unsafe.Pointer, r *Reader) { + var i int64 + if c.convert != nil { + i = c.convert(r) + } else { + i = r.ReadLong() + } + sec := i / 1e3 + nsec := (i - sec*1e3) * 1e6 + t := time.Unix(sec, nsec) + + if c.local { + // When doing unix time, Go will convert the time from UTC to Local, + // changing the time by the number of seconds in the zone offset. + // Remove those added seconds. + _, offset := t.Zone() + t = t.Add(time.Duration(-1*offset) * time.Second) + *((*time.Time)(ptr)) = t + return + } + *((*time.Time)(ptr)) = t.UTC() +} + +func (c *timestampMillisCodec) Encode(ptr unsafe.Pointer, w *Writer) { + t := *((*time.Time)(ptr)) + if c.local { + t = t.Local() + t = time.Date(t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second(), t.Nanosecond(), time.UTC) + } + w.WriteLong(t.Unix()*1e3 + int64(t.Nanosecond()/1e6)) +} + +type timestampMicrosCodec struct { + local bool + convert func(*Reader) int64 +} + +func (c *timestampMicrosCodec) Decode(ptr unsafe.Pointer, r *Reader) { + var i int64 + if c.convert != nil { + i = c.convert(r) + } else { + i = r.ReadLong() + } + sec := i / 1e6 + nsec := (i - sec*1e6) * 1e3 + t := time.Unix(sec, nsec) + + if c.local { + // When doing unix time, Go will convert the time from UTC to Local, + // changing the time by the number of seconds in the zone offset. + // Remove those added seconds. + _, offset := t.Zone() + t = t.Add(time.Duration(-1*offset) * time.Second) + *((*time.Time)(ptr)) = t + return + } + *((*time.Time)(ptr)) = t.UTC() +} + +func (c *timestampMicrosCodec) Encode(ptr unsafe.Pointer, w *Writer) { + t := *((*time.Time)(ptr)) + if c.local { + t = t.Local() + t = time.Date(t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second(), t.Nanosecond(), time.UTC) + } + w.WriteLong(t.Unix()*1e6 + int64(t.Nanosecond()/1e3)) +} + +type timeMillisCodec struct{} + +func (c *timeMillisCodec) Decode(ptr unsafe.Pointer, r *Reader) { + i := r.ReadInt() + *((*time.Duration)(ptr)) = time.Duration(i) * time.Millisecond +} + +func (c *timeMillisCodec) Encode(ptr unsafe.Pointer, w *Writer) { + d := *((*time.Duration)(ptr)) + w.WriteInt(int32(d.Nanoseconds() / int64(time.Millisecond))) +} + +type timeMicrosCodec struct { + convert func(*Reader) int64 +} + +func (c *timeMicrosCodec) Decode(ptr unsafe.Pointer, r *Reader) { + var i int64 + if c.convert != nil { + i = c.convert(r) + } else { + i = r.ReadLong() + } + *((*time.Duration)(ptr)) = time.Duration(i) * time.Microsecond +} + +func (c *timeMicrosCodec) Encode(ptr unsafe.Pointer, w *Writer) { + d := *((*time.Duration)(ptr)) + w.WriteLong(d.Nanoseconds() / int64(time.Microsecond)) +} + +var one = big.NewInt(1) + +type bytesDecimalCodec struct { + prec int + scale int +} + +func (c *bytesDecimalCodec) Decode(ptr unsafe.Pointer, r *Reader) { + b := r.ReadBytes() + if i := (&big.Int{}).SetBytes(b); len(b) > 0 && b[0]&0x80 > 0 { + i.Sub(i, new(big.Int).Lsh(one, uint(len(b))*8)) + } + *((**big.Rat)(ptr)) = ratFromBytes(b, c.scale) +} + +func ratFromBytes(b []byte, scale int) *big.Rat { + num := (&big.Int{}).SetBytes(b) + if len(b) > 0 && b[0]&0x80 > 0 { + num.Sub(num, new(big.Int).Lsh(one, uint(len(b))*8)) + } + denom := new(big.Int).Exp(big.NewInt(10), big.NewInt(int64(scale)), nil) + return new(big.Rat).SetFrac(num, denom) +} + +func (c *bytesDecimalCodec) Encode(ptr unsafe.Pointer, w *Writer) { + r := (*big.Rat)(ptr) + scale := new(big.Int).Exp(big.NewInt(10), big.NewInt(int64(c.scale)), nil) + i := (&big.Int{}).Mul(r.Num(), scale) + i = i.Div(i, r.Denom()) + + var b []byte + switch i.Sign() { + case 0: + b = []byte{0} + + case 1: + b = i.Bytes() + if b[0]&0x80 > 0 { + b = append([]byte{0}, b...) + } + + case -1: + length := uint(i.BitLen()/8+1) * 8 + b = i.Add(i, (&big.Int{}).Lsh(one, length)).Bytes() + } + w.WriteBytes(b) +} + +type bytesDecimalPtrCodec struct { + prec int + scale int +} + +func (c *bytesDecimalPtrCodec) Decode(ptr unsafe.Pointer, r *Reader) { + b := r.ReadBytes() + if i := (&big.Int{}).SetBytes(b); len(b) > 0 && b[0]&0x80 > 0 { + i.Sub(i, new(big.Int).Lsh(one, uint(len(b))*8)) + } + *((**big.Rat)(ptr)) = ratFromBytes(b, c.scale) +} + +func (c *bytesDecimalPtrCodec) Encode(ptr unsafe.Pointer, w *Writer) { + r := *((**big.Rat)(ptr)) + scale := new(big.Int).Exp(big.NewInt(10), big.NewInt(int64(c.scale)), nil) + i := (&big.Int{}).Mul(r.Num(), scale) + i = i.Div(i, r.Denom()) + + var b []byte + switch i.Sign() { + case 0: + b = []byte{0} + + case 1: + b = i.Bytes() + if b[0]&0x80 > 0 { + b = append([]byte{0}, b...) + } + + case -1: + length := uint(i.BitLen()/8+1) * 8 + b = i.Add(i, (&big.Int{}).Lsh(one, length)).Bytes() + } + w.WriteBytes(b) +} diff --git a/vendor/github.com/hamba/avro/v2/codec_ptr.go b/vendor/github.com/hamba/avro/v2/codec_ptr.go new file mode 100644 index 00000000..07b099ee --- /dev/null +++ b/vendor/github.com/hamba/avro/v2/codec_ptr.go @@ -0,0 +1,66 @@ +package avro + +import ( + "errors" + "unsafe" + + "github.com/modern-go/reflect2" +) + +func decoderOfPtr(d *decoderContext, schema Schema, typ reflect2.Type) ValDecoder { + ptrType := typ.(*reflect2.UnsafePtrType) + elemType := ptrType.Elem() + + decoder := decoderOfType(d, schema, elemType) + + return &dereferenceDecoder{typ: elemType, decoder: decoder} +} + +type dereferenceDecoder struct { + typ reflect2.Type + decoder ValDecoder +} + +func (d *dereferenceDecoder) Decode(ptr unsafe.Pointer, r *Reader) { + if *((*unsafe.Pointer)(ptr)) == nil { + // Create new instance + newPtr := d.typ.UnsafeNew() + d.decoder.Decode(newPtr, r) + *((*unsafe.Pointer)(ptr)) = newPtr + return + } + + // Reuse existing instance + d.decoder.Decode(*((*unsafe.Pointer)(ptr)), r) +} + +func encoderOfPtr(e *encoderContext, schema Schema, typ reflect2.Type) ValEncoder { + ptrType := typ.(*reflect2.UnsafePtrType) + elemType := ptrType.Elem() + + enc := encoderOfType(e, schema, elemType) + + return &dereferenceEncoder{typ: elemType, encoder: enc} +} + +type dereferenceEncoder struct { + typ reflect2.Type + encoder ValEncoder +} + +func (d *dereferenceEncoder) Encode(ptr unsafe.Pointer, w *Writer) { + if *((*unsafe.Pointer)(ptr)) == nil { + w.Error = errors.New("avro: cannot encode nil pointer") + return + } + + d.encoder.Encode(*((*unsafe.Pointer)(ptr)), w) +} + +type referenceDecoder struct { + decoder ValDecoder +} + +func (decoder *referenceDecoder) Decode(ptr unsafe.Pointer, r *Reader) { + decoder.decoder.Decode(unsafe.Pointer(&ptr), r) +} diff --git a/vendor/github.com/hamba/avro/v2/codec_record.go b/vendor/github.com/hamba/avro/v2/codec_record.go new file mode 100644 index 00000000..7cfdbef3 --- /dev/null +++ b/vendor/github.com/hamba/avro/v2/codec_record.go @@ -0,0 +1,503 @@ +package avro + +import ( + "errors" + "fmt" + "io" + "reflect" + "unsafe" + + "github.com/modern-go/reflect2" +) + +func createDecoderOfRecord(d *decoderContext, schema Schema, typ reflect2.Type) ValDecoder { + switch typ.Kind() { + case reflect.Struct: + return decoderOfStruct(d, schema, typ) + + case reflect.Map: + if typ.(reflect2.MapType).Key().Kind() != reflect.String || + typ.(reflect2.MapType).Elem().Kind() != reflect.Interface { + break + } + return decoderOfRecord(d, schema, typ) + + case reflect.Ptr: + return decoderOfPtr(d, schema, typ) + + case reflect.Interface: + if ifaceType, ok := typ.(*reflect2.UnsafeIFaceType); ok { + return &recordIfaceDecoder{schema: schema, valType: ifaceType} + } + } + + return &errorDecoder{err: fmt.Errorf("avro: %s is unsupported for avro %s", typ.String(), schema.Type())} +} + +func createEncoderOfRecord(e *encoderContext, schema *RecordSchema, typ reflect2.Type) ValEncoder { + switch typ.Kind() { + case reflect.Struct: + return encoderOfStruct(e, schema, typ) + + case reflect.Map: + if typ.(reflect2.MapType).Key().Kind() != reflect.String || + typ.(reflect2.MapType).Elem().Kind() != reflect.Interface { + break + } + return encoderOfRecord(e, schema, typ) + + case reflect.Ptr: + return encoderOfPtr(e, schema, typ) + } + + return &errorEncoder{err: fmt.Errorf("avro: %s is unsupported for avro %s", typ.String(), schema.Type())} +} + +func decoderOfStruct(d *decoderContext, schema Schema, typ reflect2.Type) ValDecoder { + rec := schema.(*RecordSchema) + structDesc := describeStruct(d.cfg.getTagKey(), typ) + + fields := make([]*structFieldDecoder, 0, len(rec.Fields())) + + for _, field := range rec.Fields() { + if field.action == FieldIgnore { + fields = append(fields, &structFieldDecoder{ + decoder: createSkipDecoder(field.Type()), + }) + continue + } + + sf := structDesc.Fields.Get(field.Name()) + if sf == nil { + for _, alias := range field.Aliases() { + sf = structDesc.Fields.Get(alias) + if sf != nil { + break + } + } + } + + // Skip field if it doesnt exist + if sf == nil { + fields = append(fields, &structFieldDecoder{ + decoder: createSkipDecoder(field.Type()), + }) + continue + } + + if field.action == FieldSetDefault { + if field.hasDef { + fields = append(fields, &structFieldDecoder{ + field: sf.Field, + decoder: createDefaultDecoder(d, field, sf.Field[len(sf.Field)-1].Type()), + }) + + continue + } + } + + dec := decoderOfType(d, field.Type(), sf.Field[len(sf.Field)-1].Type()) + fields = append(fields, &structFieldDecoder{ + field: sf.Field, + decoder: dec, + }) + } + + return &structDecoder{typ: typ, fields: fields} +} + +type structFieldDecoder struct { + field []*reflect2.UnsafeStructField + decoder ValDecoder +} + +type structDecoder struct { + typ reflect2.Type + fields []*structFieldDecoder +} + +func (d *structDecoder) Decode(ptr unsafe.Pointer, r *Reader) { + for _, field := range d.fields { + // Skip case + if field.field == nil { + field.decoder.Decode(nil, r) + continue + } + + fieldPtr := ptr + for i, f := range field.field { + fieldPtr = f.UnsafeGet(fieldPtr) + + if i == len(field.field)-1 { + break + } + + if f.Type().Kind() == reflect.Ptr { + if *((*unsafe.Pointer)(fieldPtr)) == nil { + newPtr := f.Type().(*reflect2.UnsafePtrType).Elem().UnsafeNew() + *((*unsafe.Pointer)(fieldPtr)) = newPtr + } + + fieldPtr = *((*unsafe.Pointer)(fieldPtr)) + } + } + field.decoder.Decode(fieldPtr, r) + + if r.Error != nil && !errors.Is(r.Error, io.EOF) { + for _, f := range field.field { + r.Error = fmt.Errorf("%s: %w", f.Name(), r.Error) + return + } + } + } +} + +func encoderOfStruct(e *encoderContext, rec *RecordSchema, typ reflect2.Type) ValEncoder { + structDesc := describeStruct(e.cfg.getTagKey(), typ) + + fields := make([]*structFieldEncoder, 0, len(rec.Fields())) + for _, field := range rec.Fields() { + sf := structDesc.Fields.Get(field.Name()) + if sf != nil { + fields = append(fields, &structFieldEncoder{ + field: sf.Field, + encoder: encoderOfType(e, field.Type(), sf.Field[len(sf.Field)-1].Type()), + }) + continue + } + + if !field.HasDefault() { + // In all other cases, this is a required field + err := fmt.Errorf("avro: record %s is missing required field %q", rec.FullName(), field.Name()) + return &errorEncoder{err: err} + } + + def := field.Default() + if field.Default() == nil { + if field.Type().Type() == Null { + // We write nothing in a Null case, just skip it + continue + } + + if field.Type().Type() == Union && field.Type().(*UnionSchema).Nullable() { + defaultType := reflect2.TypeOf(&def) + fields = append(fields, &structFieldEncoder{ + defaultPtr: reflect2.PtrOf(&def), + encoder: encoderOfNullableUnion(e, field.Type(), defaultType), + }) + continue + } + } + + defaultType := reflect2.TypeOf(def) + defaultEncoder := encoderOfType(e, field.Type(), defaultType) + if defaultType.LikePtr() { + defaultEncoder = &onePtrEncoder{defaultEncoder} + } + fields = append(fields, &structFieldEncoder{ + defaultPtr: reflect2.PtrOf(def), + encoder: defaultEncoder, + }) + } + return &structEncoder{typ: typ, fields: fields} +} + +type structFieldEncoder struct { + field []*reflect2.UnsafeStructField + defaultPtr unsafe.Pointer + encoder ValEncoder +} + +type structEncoder struct { + typ reflect2.Type + fields []*structFieldEncoder +} + +func (e *structEncoder) Encode(ptr unsafe.Pointer, w *Writer) { + for _, field := range e.fields { + // Default case + if field.field == nil { + field.encoder.Encode(field.defaultPtr, w) + continue + } + + fieldPtr := ptr + for i, f := range field.field { + fieldPtr = f.UnsafeGet(fieldPtr) + + if i == len(field.field)-1 { + break + } + + if f.Type().Kind() == reflect.Ptr { + if *((*unsafe.Pointer)(fieldPtr)) == nil { + w.Error = fmt.Errorf("embedded field %q is nil", f.Name()) + return + } + + fieldPtr = *((*unsafe.Pointer)(fieldPtr)) + } + } + field.encoder.Encode(fieldPtr, w) + + if w.Error != nil && !errors.Is(w.Error, io.EOF) { + for _, f := range field.field { + w.Error = fmt.Errorf("%s: %w", f.Name(), w.Error) + return + } + } + } +} + +func decoderOfRecord(d *decoderContext, schema Schema, typ reflect2.Type) ValDecoder { + rec := schema.(*RecordSchema) + mapType := typ.(*reflect2.UnsafeMapType) + + fields := make([]recordMapDecoderField, len(rec.Fields())) + for i, field := range rec.Fields() { + switch field.action { + case FieldIgnore: + fields[i] = recordMapDecoderField{ + name: field.Name(), + decoder: createSkipDecoder(field.Type()), + skip: true, + } + continue + case FieldSetDefault: + if field.hasDef { + fields[i] = recordMapDecoderField{ + name: field.Name(), + decoder: createDefaultDecoder(d, field, mapType.Elem()), + } + continue + } + } + + fields[i] = recordMapDecoderField{ + name: field.Name(), + decoder: newEfaceDecoder(d, field.Type()), + } + } + + return &recordMapDecoder{ + mapType: mapType, + elemType: mapType.Elem(), + fields: fields, + } +} + +type recordMapDecoderField struct { + name string + decoder ValDecoder + skip bool +} + +type recordMapDecoder struct { + mapType *reflect2.UnsafeMapType + elemType reflect2.Type + fields []recordMapDecoderField +} + +func (d *recordMapDecoder) Decode(ptr unsafe.Pointer, r *Reader) { + if d.mapType.UnsafeIsNil(ptr) { + d.mapType.UnsafeSet(ptr, d.mapType.UnsafeMakeMap(len(d.fields))) + } + + for _, field := range d.fields { + elemPtr := d.elemType.UnsafeNew() + field.decoder.Decode(elemPtr, r) + if field.skip { + continue + } + + d.mapType.UnsafeSetIndex(ptr, reflect2.PtrOf(field), elemPtr) + } + + if r.Error != nil && !errors.Is(r.Error, io.EOF) { + r.Error = fmt.Errorf("%v: %w", d.mapType, r.Error) + } +} + +func encoderOfRecord(e *encoderContext, rec *RecordSchema, typ reflect2.Type) ValEncoder { + mapType := typ.(*reflect2.UnsafeMapType) + + fields := make([]mapEncoderField, len(rec.Fields())) + for i, field := range rec.Fields() { + fields[i] = mapEncoderField{ + name: field.Name(), + hasDef: field.HasDefault(), + def: field.Default(), + encoder: encoderOfType(e, field.Type(), mapType.Elem()), + } + + if field.HasDefault() { + switch { + case field.Type().Type() == Union: + union := field.Type().(*UnionSchema) + fields[i].def = map[string]any{ + string(union.Types()[0].Type()): field.Default(), + } + case field.Default() == nil: + continue + } + + defaultType := reflect2.TypeOf(fields[i].def) + fields[i].defEncoder = encoderOfType(e, field.Type(), defaultType) + if defaultType.LikePtr() { + fields[i].defEncoder = &onePtrEncoder{fields[i].defEncoder} + } + } + } + + return &recordMapEncoder{ + mapType: mapType, + fields: fields, + } +} + +type mapEncoderField struct { + name string + hasDef bool + def any + defEncoder ValEncoder + encoder ValEncoder +} + +type recordMapEncoder struct { + mapType *reflect2.UnsafeMapType + fields []mapEncoderField +} + +func (e *recordMapEncoder) Encode(ptr unsafe.Pointer, w *Writer) { + for _, field := range e.fields { + // The first property of mapEncoderField is the name, so a pointer + // to field is a pointer to the name. + valPtr := e.mapType.UnsafeGetIndex(ptr, reflect2.PtrOf(field)) + if valPtr == nil { + // Missing required field + if !field.hasDef { + w.Error = fmt.Errorf("avro: missing required field %s", field.name) + return + } + + // Null default + if field.def == nil { + continue + } + + defPtr := reflect2.PtrOf(field.def) + field.defEncoder.Encode(defPtr, w) + continue + } + + field.encoder.Encode(valPtr, w) + + if w.Error != nil && !errors.Is(w.Error, io.EOF) { + w.Error = fmt.Errorf("%s: %w", field.name, w.Error) + return + } + } +} + +type recordIfaceDecoder struct { + schema Schema + valType *reflect2.UnsafeIFaceType +} + +func (d *recordIfaceDecoder) Decode(ptr unsafe.Pointer, r *Reader) { + obj := d.valType.UnsafeIndirect(ptr) + if reflect2.IsNil(obj) { + r.ReportError("decode non empty interface", "can not unmarshal into nil") + return + } + + r.ReadVal(d.schema, obj) +} + +type structDescriptor struct { + Type reflect2.Type + Fields structFields +} + +type structFields []*structField + +func (sf structFields) Get(name string) *structField { + for _, f := range sf { + if f.Name == name { + return f + } + } + + return nil +} + +type structField struct { + Name string + Field []*reflect2.UnsafeStructField + + anon *reflect2.UnsafeStructType +} + +func describeStruct(tagKey string, typ reflect2.Type) *structDescriptor { + structType := typ.(*reflect2.UnsafeStructType) + fields := structFields{} + + var curr []structField + next := []structField{{anon: structType}} + + visited := map[uintptr]bool{} + + for len(next) > 0 { + curr, next = next, curr[:0] + + for _, f := range curr { + rtype := f.anon.RType() + if visited[f.anon.RType()] { + continue + } + visited[rtype] = true + + for i := 0; i < f.anon.NumField(); i++ { + field := f.anon.Field(i).(*reflect2.UnsafeStructField) + isUnexported := field.PkgPath() != "" + + chain := make([]*reflect2.UnsafeStructField, len(f.Field)+1) + copy(chain, f.Field) + chain[len(f.Field)] = field + + if field.Anonymous() { + t := field.Type() + if t.Kind() == reflect.Ptr { + t = t.(*reflect2.UnsafePtrType).Elem() + } + if t.Kind() != reflect.Struct { + continue + } + + next = append(next, structField{Field: chain, anon: t.(*reflect2.UnsafeStructType)}) + continue + } + + // Ignore unexported fields. + if isUnexported { + continue + } + + fieldName := field.Name() + if tag, ok := field.Tag().Lookup(tagKey); ok { + fieldName = tag + } + + fields = append(fields, &structField{ + Name: fieldName, + Field: chain, + }) + } + } + } + + return &structDescriptor{ + Type: structType, + Fields: fields, + } +} diff --git a/vendor/github.com/hamba/avro/v2/codec_skip.go b/vendor/github.com/hamba/avro/v2/codec_skip.go new file mode 100644 index 00000000..965efe8a --- /dev/null +++ b/vendor/github.com/hamba/avro/v2/codec_skip.go @@ -0,0 +1,225 @@ +package avro + +import ( + "fmt" + "unsafe" +) + +func createSkipDecoder(schema Schema) ValDecoder { + switch schema.Type() { + case Boolean: + return &boolSkipDecoder{} + + case Int: + return &intSkipDecoder{} + + case Long: + return &longSkipDecoder{} + + case Float: + return &floatSkipDecoder{} + + case Double: + return &doubleSkipDecoder{} + + case String: + return &stringSkipDecoder{} + + case Bytes: + return &bytesSkipDecoder{} + + case Record: + return skipDecoderOfRecord(schema) + + case Ref: + return createSkipDecoder(schema.(*RefSchema).Schema()) + + case Enum: + return &enumSkipDecoder{symbols: schema.(*EnumSchema).Symbols()} + + case Array: + return skipDecoderOfArray(schema) + + case Map: + return skipDecoderOfMap(schema) + + case Union: + return skipDecoderOfUnion(schema) + + case Fixed: + return &fixedSkipDecoder{size: schema.(*FixedSchema).Size()} + + default: + return &errorDecoder{err: fmt.Errorf("avro: schema type %s is unsupported", schema.Type())} + } +} + +type boolSkipDecoder struct{} + +func (*boolSkipDecoder) Decode(_ unsafe.Pointer, r *Reader) { + r.SkipBool() +} + +type intSkipDecoder struct{} + +func (*intSkipDecoder) Decode(_ unsafe.Pointer, r *Reader) { + r.SkipInt() +} + +type longSkipDecoder struct{} + +func (*longSkipDecoder) Decode(_ unsafe.Pointer, r *Reader) { + r.SkipLong() +} + +type floatSkipDecoder struct{} + +func (*floatSkipDecoder) Decode(_ unsafe.Pointer, r *Reader) { + r.SkipFloat() +} + +type doubleSkipDecoder struct{} + +func (*doubleSkipDecoder) Decode(_ unsafe.Pointer, r *Reader) { + r.SkipDouble() +} + +type stringSkipDecoder struct{} + +func (*stringSkipDecoder) Decode(_ unsafe.Pointer, r *Reader) { + r.SkipString() +} + +type bytesSkipDecoder struct{} + +func (c *bytesSkipDecoder) Decode(_ unsafe.Pointer, r *Reader) { + r.SkipBytes() +} + +func skipDecoderOfRecord(schema Schema) ValDecoder { + rec := schema.(*RecordSchema) + + decoders := make([]ValDecoder, len(rec.Fields())) + for i, field := range rec.Fields() { + decoders[i] = createSkipDecoder(field.Type()) + } + + return &recordSkipDecoder{ + decoders: decoders, + } +} + +type recordSkipDecoder struct { + decoders []ValDecoder +} + +func (d *recordSkipDecoder) Decode(_ unsafe.Pointer, r *Reader) { + for _, decoder := range d.decoders { + decoder.Decode(nil, r) + } +} + +type enumSkipDecoder struct { + symbols []string +} + +func (c *enumSkipDecoder) Decode(_ unsafe.Pointer, r *Reader) { + r.SkipInt() +} + +func skipDecoderOfArray(schema Schema) ValDecoder { + arr := schema.(*ArraySchema) + decoder := createSkipDecoder(arr.Items()) + + return &sliceSkipDecoder{ + decoder: decoder, + } +} + +type sliceSkipDecoder struct { + decoder ValDecoder +} + +func (d *sliceSkipDecoder) Decode(_ unsafe.Pointer, r *Reader) { + for { + l, size := r.ReadBlockHeader() + if l == 0 { + break + } + + if size > 0 { + r.SkipNBytes(int(size)) + continue + } + + for i := 0; i < int(l); i++ { + d.decoder.Decode(nil, r) + } + } +} + +func skipDecoderOfMap(schema Schema) ValDecoder { + m := schema.(*MapSchema) + decoder := createSkipDecoder(m.Values()) + + return &mapSkipDecoder{ + decoder: decoder, + } +} + +type mapSkipDecoder struct { + decoder ValDecoder +} + +func (d *mapSkipDecoder) Decode(_ unsafe.Pointer, r *Reader) { + for { + l, size := r.ReadBlockHeader() + if l == 0 { + break + } + + if size > 0 { + r.SkipNBytes(int(size)) + continue + } + + for i := 0; i < int(l); i++ { + r.SkipString() + d.decoder.Decode(nil, r) + } + } +} + +func skipDecoderOfUnion(schema Schema) ValDecoder { + union := schema.(*UnionSchema) + + return &unionSkipDecoder{ + schema: union, + } +} + +type unionSkipDecoder struct { + schema *UnionSchema +} + +func (d *unionSkipDecoder) Decode(_ unsafe.Pointer, r *Reader) { + _, resSchema := getUnionSchema(d.schema, r) + if resSchema == nil { + return + } + + // In a null case, just return + if resSchema.Type() == Null { + return + } + + createSkipDecoder(resSchema).Decode(nil, r) +} + +type fixedSkipDecoder struct { + size int +} + +func (d *fixedSkipDecoder) Decode(_ unsafe.Pointer, r *Reader) { + r.SkipNBytes(d.size) +} diff --git a/vendor/github.com/hamba/avro/v2/codec_union.go b/vendor/github.com/hamba/avro/v2/codec_union.go new file mode 100644 index 00000000..7d80b539 --- /dev/null +++ b/vendor/github.com/hamba/avro/v2/codec_union.go @@ -0,0 +1,460 @@ +package avro + +import ( + "errors" + "fmt" + "reflect" + "strings" + "unsafe" + + "github.com/modern-go/reflect2" +) + +func createDecoderOfUnion(d *decoderContext, schema *UnionSchema, typ reflect2.Type) ValDecoder { + switch typ.Kind() { + case reflect.Map: + if typ.(reflect2.MapType).Key().Kind() != reflect.String || + typ.(reflect2.MapType).Elem().Kind() != reflect.Interface { + break + } + return decoderOfMapUnion(d, schema, typ) + case reflect.Slice: + if !schema.Nullable() { + break + } + return decoderOfNullableUnion(d, schema, typ) + case reflect.Ptr: + if !schema.Nullable() { + break + } + return decoderOfNullableUnion(d, schema, typ) + case reflect.Interface: + if _, ok := typ.(*reflect2.UnsafeIFaceType); !ok { + dec, err := decoderOfResolvedUnion(d, schema) + if err != nil { + return &errorDecoder{err: fmt.Errorf("avro: problem resolving decoder for Avro %s: %w", schema.Type(), err)} + } + return dec + } + } + + return &errorDecoder{err: fmt.Errorf("avro: %s is unsupported for Avro %s", typ.String(), schema.Type())} +} + +func createEncoderOfUnion(e *encoderContext, schema *UnionSchema, typ reflect2.Type) ValEncoder { + switch typ.Kind() { + case reflect.Map: + if typ.(reflect2.MapType).Key().Kind() != reflect.String || + typ.(reflect2.MapType).Elem().Kind() != reflect.Interface { + break + } + return encoderOfMapUnion(e, schema, typ) + case reflect.Slice: + if !schema.Nullable() { + break + } + return encoderOfNullableUnion(e, schema, typ) + case reflect.Ptr: + if !schema.Nullable() { + break + } + return encoderOfNullableUnion(e, schema, typ) + } + return encoderOfResolverUnion(e, schema, typ) +} + +func decoderOfMapUnion(d *decoderContext, union *UnionSchema, typ reflect2.Type) ValDecoder { + mapType := typ.(*reflect2.UnsafeMapType) + + typeDecs := make([]ValDecoder, len(union.Types())) + for i, s := range union.Types() { + if s.Type() == Null { + continue + } + typeDecs[i] = newEfaceDecoder(d, s) + } + + return &mapUnionDecoder{ + cfg: d.cfg, + schema: union, + mapType: mapType, + elemType: mapType.Elem(), + typeDecs: typeDecs, + } +} + +type mapUnionDecoder struct { + cfg *frozenConfig + schema *UnionSchema + mapType *reflect2.UnsafeMapType + elemType reflect2.Type + typeDecs []ValDecoder +} + +func (d *mapUnionDecoder) Decode(ptr unsafe.Pointer, r *Reader) { + idx, resSchema := getUnionSchema(d.schema, r) + if resSchema == nil { + return + } + + // In a null case, just return + if resSchema.Type() == Null { + return + } + + if d.mapType.UnsafeIsNil(ptr) { + d.mapType.UnsafeSet(ptr, d.mapType.UnsafeMakeMap(1)) + } + + key := schemaTypeName(resSchema) + keyPtr := reflect2.PtrOf(key) + + elemPtr := d.elemType.UnsafeNew() + d.typeDecs[idx].Decode(elemPtr, r) + + d.mapType.UnsafeSetIndex(ptr, keyPtr, elemPtr) +} + +func encoderOfMapUnion(e *encoderContext, union *UnionSchema, _ reflect2.Type) ValEncoder { + return &mapUnionEncoder{ + cfg: e.cfg, + schema: union, + } +} + +type mapUnionEncoder struct { + cfg *frozenConfig + schema *UnionSchema +} + +func (e *mapUnionEncoder) Encode(ptr unsafe.Pointer, w *Writer) { + m := *((*map[string]any)(ptr)) + + if len(m) > 1 { + w.Error = errors.New("avro: cannot encode union map with multiple entries") + return + } + + name := "null" + val := any(nil) + for k, v := range m { + name = k + val = v + break + } + + schema, pos := e.schema.Types().Get(name) + if schema == nil { + w.Error = fmt.Errorf("avro: unknown union type %s", name) + return + } + + w.WriteInt(int32(pos)) + + if schema.Type() == Null && val == nil { + return + } + + elemType := reflect2.TypeOf(val) + elemPtr := reflect2.PtrOf(val) + + encoder := encoderOfType(newEncoderContext(e.cfg), schema, elemType) + if elemType.LikePtr() { + encoder = &onePtrEncoder{encoder} + } + encoder.Encode(elemPtr, w) +} + +func decoderOfNullableUnion(d *decoderContext, schema Schema, typ reflect2.Type) ValDecoder { + union := schema.(*UnionSchema) + _, typeIdx := union.Indices() + + var ( + baseTyp reflect2.Type + isPtr bool + ) + switch v := typ.(type) { + case *reflect2.UnsafePtrType: + baseTyp = v.Elem() + isPtr = true + case *reflect2.UnsafeSliceType: + baseTyp = v + } + decoder := decoderOfType(d, union.Types()[typeIdx], baseTyp) + + return &unionNullableDecoder{ + schema: union, + typ: baseTyp, + isPtr: isPtr, + decoder: decoder, + } +} + +type unionNullableDecoder struct { + schema *UnionSchema + typ reflect2.Type + isPtr bool + decoder ValDecoder +} + +func (d *unionNullableDecoder) Decode(ptr unsafe.Pointer, r *Reader) { + _, schema := getUnionSchema(d.schema, r) + if schema == nil { + return + } + + if schema.Type() == Null { + *((*unsafe.Pointer)(ptr)) = nil + return + } + + // Handle the non-ptr case separately. + if !d.isPtr { + if d.typ.UnsafeIsNil(ptr) { + // Create a new instance. + newPtr := d.typ.UnsafeNew() + d.decoder.Decode(newPtr, r) + d.typ.UnsafeSet(ptr, newPtr) + return + } + + // Reuse the existing instance. + d.decoder.Decode(ptr, r) + return + } + + if *((*unsafe.Pointer)(ptr)) == nil { + // Create new instance. + newPtr := d.typ.UnsafeNew() + d.decoder.Decode(newPtr, r) + *((*unsafe.Pointer)(ptr)) = newPtr + return + } + + // Reuse existing instance. + d.decoder.Decode(*((*unsafe.Pointer)(ptr)), r) +} + +func encoderOfNullableUnion(e *encoderContext, schema Schema, typ reflect2.Type) ValEncoder { + union := schema.(*UnionSchema) + nullIdx, typeIdx := union.Indices() + + var ( + baseTyp reflect2.Type + isPtr bool + ) + switch v := typ.(type) { + case *reflect2.UnsafePtrType: + baseTyp = v.Elem() + isPtr = true + case *reflect2.UnsafeSliceType: + baseTyp = v + } + encoder := encoderOfType(e, union.Types()[typeIdx], baseTyp) + + return &unionNullableEncoder{ + schema: union, + encoder: encoder, + isPtr: isPtr, + nullIdx: int32(nullIdx), + typeIdx: int32(typeIdx), + } +} + +type unionNullableEncoder struct { + schema *UnionSchema + encoder ValEncoder + isPtr bool + nullIdx int32 + typeIdx int32 +} + +func (e *unionNullableEncoder) Encode(ptr unsafe.Pointer, w *Writer) { + if *((*unsafe.Pointer)(ptr)) == nil { + w.WriteInt(e.nullIdx) + return + } + + w.WriteInt(e.typeIdx) + newPtr := ptr + if e.isPtr { + newPtr = *((*unsafe.Pointer)(ptr)) + } + e.encoder.Encode(newPtr, w) +} + +func decoderOfResolvedUnion(d *decoderContext, schema Schema) (ValDecoder, error) { + union := schema.(*UnionSchema) + + types := make([]reflect2.Type, len(union.Types())) + decoders := make([]ValDecoder, len(union.Types())) + for i, schema := range union.Types() { + name := unionResolutionName(schema) + + typ, err := d.cfg.resolver.Type(name) + if err != nil { + if d.cfg.config.UnionResolutionError { + return nil, err + } + + if d.cfg.config.PartialUnionTypeResolution { + decoders[i] = nil + types[i] = nil + continue + } + + decoders = []ValDecoder{} + types = []reflect2.Type{} + break + } + + decoder := decoderOfType(d, schema, typ) + decoders[i] = decoder + types[i] = typ + } + + return &unionResolvedDecoder{ + cfg: d.cfg, + schema: union, + types: types, + decoders: decoders, + }, nil +} + +type unionResolvedDecoder struct { + cfg *frozenConfig + schema *UnionSchema + types []reflect2.Type + decoders []ValDecoder +} + +func (d *unionResolvedDecoder) Decode(ptr unsafe.Pointer, r *Reader) { + i, schema := getUnionSchema(d.schema, r) + if schema == nil { + return + } + + pObj := (*any)(ptr) + + if schema.Type() == Null { + *pObj = nil + return + } + + if i >= len(d.decoders) || d.decoders[i] == nil { + if d.cfg.config.UnionResolutionError { + r.ReportError("decode union type", "unknown union type") + return + } + + // We cannot resolve this, set it to the map type + name := schemaTypeName(schema) + obj := map[string]any{} + vTyp, err := genericReceiver(schema) + if err != nil { + r.ReportError("Union", err.Error()) + return + } + obj[name] = genericDecode(vTyp, decoderOfType(newDecoderContext(d.cfg), schema, vTyp), r) + + *pObj = obj + return + } + + typ := d.types[i] + var newPtr unsafe.Pointer + switch typ.Kind() { + case reflect.Map: + mapType := typ.(*reflect2.UnsafeMapType) + newPtr = mapType.UnsafeMakeMap(1) + + case reflect.Slice: + mapType := typ.(*reflect2.UnsafeSliceType) + newPtr = mapType.UnsafeMakeSlice(1, 1) + + case reflect.Ptr: + elemType := typ.(*reflect2.UnsafePtrType).Elem() + newPtr = elemType.UnsafeNew() + + default: + newPtr = typ.UnsafeNew() + } + + d.decoders[i].Decode(newPtr, r) + *pObj = typ.UnsafeIndirect(newPtr) +} + +func unionResolutionName(schema Schema) string { + name := schemaTypeName(schema) + switch schema.Type() { + case Map: + name += ":" + valSchema := schema.(*MapSchema).Values() + valName := schemaTypeName(valSchema) + + name += valName + + case Array: + name += ":" + itemSchema := schema.(*ArraySchema).Items() + itemName := schemaTypeName(itemSchema) + + name += itemName + } + + return name +} + +func encoderOfResolverUnion(e *encoderContext, schema Schema, typ reflect2.Type) ValEncoder { + union := schema.(*UnionSchema) + + names, err := e.cfg.resolver.Name(typ) + if err != nil { + return &errorEncoder{err: err} + } + + var pos int + for _, name := range names { + if idx := strings.Index(name, ":"); idx > 0 { + name = name[:idx] + } + + schema, pos = union.Types().Get(name) + if schema != nil { + break + } + } + if schema == nil { + return &errorEncoder{err: fmt.Errorf("avro: unknown union type %s", names[0])} + } + + encoder := encoderOfType(e, schema, typ) + + return &unionResolverEncoder{ + pos: pos, + encoder: encoder, + } +} + +type unionResolverEncoder struct { + pos int + encoder ValEncoder +} + +func (e *unionResolverEncoder) Encode(ptr unsafe.Pointer, w *Writer) { + w.WriteInt(int32(e.pos)) + + e.encoder.Encode(ptr, w) +} + +func getUnionSchema(schema *UnionSchema, r *Reader) (int, Schema) { + types := schema.Types() + + idx := int(r.ReadInt()) + if idx < 0 || idx > len(types)-1 { + r.ReportError("decode union type", "unknown union type") + return 0, nil + } + + return idx, types[idx] +} diff --git a/vendor/github.com/hamba/avro/v2/config.go b/vendor/github.com/hamba/avro/v2/config.go new file mode 100644 index 00000000..57b2769a --- /dev/null +++ b/vendor/github.com/hamba/avro/v2/config.go @@ -0,0 +1,282 @@ +package avro + +import ( + "errors" + "io" + "sync" + + "github.com/modern-go/reflect2" +) + +const ( + defaultMaxByteSliceSize = 1_048_576 // 1 MiB +) + +// DefaultConfig is the default API. +var DefaultConfig = Config{}.Freeze() + +// Config customises how the codec should behave. +type Config struct { + // TagKey is the struct tag key used when en/decoding structs. + // This defaults to "avro". + TagKey string + + // BlockLength is the length of blocks for maps and arrays. + // This defaults to 100. + BlockLength int + + // DisableBlockSizeHeader disables encoding of an array/map size in bytes. + // Encoded array/map will be prefixed with only the number of elements in + // contrast with default behavior which prefixes them with the number of elements + // and the total number of bytes in the array/map. Both approaches are valid according to the + // Avro specification, however not all decoders support the latter. + DisableBlockSizeHeader bool + + // UnionResolutionError determines if an error will be returned + // when a type cannot be resolved while decoding a union. + UnionResolutionError bool + + // PartialUnionTypeResolution dictates if the union type resolution + // should be attempted even when not all union types are registered. + // When enabled, the underlying type will get resolved if it is registered + // even if other types of the union are not. If resolution fails, logic + // falls back to default union resolution behavior based on the value of + // UnionResolutionError. + PartialUnionTypeResolution bool + + // Disable caching layer for encoders and decoders, forcing them to get rebuilt on every + // call to Marshal() and Unmarshal() + DisableCaching bool + + // MaxByteSliceSize is the maximum size of `bytes` or `string` types the Reader will create, defaulting to 1MiB. + // If this size is exceeded, the Reader returns an error. This can be disabled by setting a negative number. + MaxByteSliceSize int + + // MaxSliceAllocSize is the maximum size that the decoder will allocate, set to the max heap + // allocation size by default. + // If this size is exceeded, the decoder returns an error. + MaxSliceAllocSize int +} + +// Freeze makes the configuration immutable. +func (c Config) Freeze() API { + api := &frozenConfig{ + config: c, + resolver: NewTypeResolver(), + } + + api.readerPool = &sync.Pool{ + New: func() any { + return &Reader{ + cfg: api, + reader: nil, + buf: nil, + head: 0, + tail: 0, + } + }, + } + api.writerPool = &sync.Pool{ + New: func() any { + return &Writer{ + cfg: api, + out: nil, + buf: make([]byte, 0, 512), + Error: nil, + } + }, + } + + return api +} + +// API represents a frozen Config. +type API interface { + // Marshal returns the Avro encoding of v. + Marshal(schema Schema, v any) ([]byte, error) + + // Unmarshal parses the Avro encoded data and stores the result in the value pointed to by v. + // If v is nil or not a pointer, Unmarshal returns an error. + Unmarshal(schema Schema, data []byte, v any) error + + // NewEncoder returns a new encoder that writes to w using schema. + NewEncoder(schema Schema, w io.Writer) *Encoder + + // NewDecoder returns a new decoder that reads from reader r using schema. + NewDecoder(schema Schema, r io.Reader) *Decoder + + // DecoderOf returns the value decoder for a given schema and type. + DecoderOf(schema Schema, typ reflect2.Type) ValDecoder + + // EncoderOf returns the value encoder for a given schema and type. + EncoderOf(schema Schema, tpy reflect2.Type) ValEncoder + + // Register registers names to their types for resolution. All primitive types are pre-registered. + Register(name string, obj any) +} + +type frozenConfig struct { + config Config + + decoderCache sync.Map // map[cacheKey]ValDecoder + encoderCache sync.Map // map[cacheKey]ValEncoder + + readerPool *sync.Pool + writerPool *sync.Pool + + resolver *TypeResolver +} + +func (c *frozenConfig) Marshal(schema Schema, v any) ([]byte, error) { + writer := c.borrowWriter() + defer c.returnWriter(writer) + + writer.WriteVal(schema, v) + if err := writer.Error; err != nil { + return nil, err + } + + result := writer.Buffer() + copied := make([]byte, len(result)) + copy(copied, result) + + return copied, nil +} + +func (c *frozenConfig) borrowWriter() *Writer { + writer := c.writerPool.Get().(*Writer) + writer.Reset(nil) + return writer +} + +func (c *frozenConfig) returnWriter(writer *Writer) { + writer.out = nil + writer.Error = nil + + c.writerPool.Put(writer) +} + +func (c *frozenConfig) Unmarshal(schema Schema, data []byte, v any) error { + reader := c.borrowReader(data) + defer c.returnReader(reader) + + reader.ReadVal(schema, v) + err := reader.Error + + if errors.Is(err, io.EOF) { + return nil + } + + return err +} + +func (c *frozenConfig) borrowReader(data []byte) *Reader { + reader := c.readerPool.Get().(*Reader) + reader.Reset(data) + return reader +} + +func (c *frozenConfig) returnReader(reader *Reader) { + reader.Error = nil + c.readerPool.Put(reader) +} + +func (c *frozenConfig) NewEncoder(schema Schema, w io.Writer) *Encoder { + writer, ok := w.(*Writer) + if !ok { + writer = NewWriter(w, 512, WithWriterConfig(c)) + } + return &Encoder{ + s: schema, + w: writer, + } +} + +func (c *frozenConfig) NewDecoder(schema Schema, r io.Reader) *Decoder { + reader := NewReader(r, 512, WithReaderConfig(c)) + return &Decoder{ + s: schema, + r: reader, + } +} + +func (c *frozenConfig) Register(name string, obj any) { + c.resolver.Register(name, obj) +} + +type cacheKey struct { + fingerprint [32]byte + rtype uintptr +} + +func (c *frozenConfig) addDecoderToCache(fingerprint [32]byte, rtype uintptr, dec ValDecoder) { + if c.config.DisableCaching { + return + } + key := cacheKey{fingerprint: fingerprint, rtype: rtype} + c.decoderCache.Store(key, dec) +} + +func (c *frozenConfig) getDecoderFromCache(fingerprint [32]byte, rtype uintptr) ValDecoder { + if c.config.DisableCaching { + return nil + } + key := cacheKey{fingerprint: fingerprint, rtype: rtype} + if dec, ok := c.decoderCache.Load(key); ok { + return dec.(ValDecoder) + } + + return nil +} + +func (c *frozenConfig) addEncoderToCache(fingerprint [32]byte, rtype uintptr, enc ValEncoder) { + if c.config.DisableCaching { + return + } + key := cacheKey{fingerprint: fingerprint, rtype: rtype} + c.encoderCache.Store(key, enc) +} + +func (c *frozenConfig) getEncoderFromCache(fingerprint [32]byte, rtype uintptr) ValEncoder { + if c.config.DisableCaching { + return nil + } + key := cacheKey{fingerprint: fingerprint, rtype: rtype} + if enc, ok := c.encoderCache.Load(key); ok { + return enc.(ValEncoder) + } + + return nil +} + +func (c *frozenConfig) getTagKey() string { + tagKey := c.config.TagKey + if tagKey == "" { + return "avro" + } + return tagKey +} + +func (c *frozenConfig) getBlockLength() int { + blockSize := c.config.BlockLength + if blockSize <= 0 { + return 100 + } + return blockSize +} + +func (c *frozenConfig) getMaxByteSliceSize() int { + size := c.config.MaxByteSliceSize + if size == 0 { + return defaultMaxByteSliceSize + } + return size +} + +func (c *frozenConfig) getMaxSliceAllocSize() int { + size := c.config.MaxSliceAllocSize + if size > maxAllocSize || size <= 0 { + return maxAllocSize + } + return size +} diff --git a/vendor/github.com/hamba/avro/v2/config_386.go b/vendor/github.com/hamba/avro/v2/config_386.go new file mode 100644 index 00000000..a168fd7b --- /dev/null +++ b/vendor/github.com/hamba/avro/v2/config_386.go @@ -0,0 +1,8 @@ +package avro + +import "math" + +// Max allocation size for an array due to the limit in number of bits in a heap address: +// https://github.com/golang/go/blob/7f76c00fc5678fa782708ba8fece63750cb89d03/src/runtime/malloc.go#L190 +// 32-bit systems accept the full 32bit address space +const maxAllocSize = math.MaxInt diff --git a/vendor/github.com/hamba/avro/v2/config_x64.go b/vendor/github.com/hamba/avro/v2/config_x64.go new file mode 100644 index 00000000..5ee7fef0 --- /dev/null +++ b/vendor/github.com/hamba/avro/v2/config_x64.go @@ -0,0 +1,7 @@ +//go:build !386 + +package avro + +// Max allocation size for an array due to the limit in number of bits in a heap address: +// https://github.com/golang/go/blob/7f76c00fc5678fa782708ba8fece63750cb89d03/src/runtime/malloc.go#L183 +const maxAllocSize = int(1 << 48) diff --git a/vendor/github.com/hamba/avro/v2/converter.go b/vendor/github.com/hamba/avro/v2/converter.go new file mode 100644 index 00000000..cc1f17ca --- /dev/null +++ b/vendor/github.com/hamba/avro/v2/converter.go @@ -0,0 +1,34 @@ +package avro + +func createLongConverter(typ Type) func(*Reader) int64 { + switch typ { + case Int: + return func(r *Reader) int64 { return int64(r.ReadInt()) } + default: + return nil + } +} + +func createFloatConverter(typ Type) func(*Reader) float32 { + switch typ { + case Int: + return func(r *Reader) float32 { return float32(r.ReadInt()) } + case Long: + return func(r *Reader) float32 { return float32(r.ReadLong()) } + default: + return nil + } +} + +func createDoubleConverter(typ Type) func(*Reader) float64 { + switch typ { + case Int: + return func(r *Reader) float64 { return float64(r.ReadInt()) } + case Long: + return func(r *Reader) float64 { return float64(r.ReadLong()) } + case Float: + return func(r *Reader) float64 { return float64(r.ReadFloat()) } + default: + return nil + } +} diff --git a/vendor/github.com/hamba/avro/v2/decoder.go b/vendor/github.com/hamba/avro/v2/decoder.go new file mode 100644 index 00000000..4ae46985 --- /dev/null +++ b/vendor/github.com/hamba/avro/v2/decoder.go @@ -0,0 +1,49 @@ +package avro + +import ( + "io" +) + +// Decoder reads and decodes Avro values from an input stream. +type Decoder struct { + s Schema + r *Reader +} + +// NewDecoder returns a new decoder that reads from reader r using schema s. +func NewDecoder(s string, r io.Reader) (*Decoder, error) { + sch, err := Parse(s) + if err != nil { + return nil, err + } + + return NewDecoderForSchema(sch, r), nil +} + +// NewDecoderForSchema returns a new decoder that reads from r using schema. +func NewDecoderForSchema(schema Schema, reader io.Reader) *Decoder { + return DefaultConfig.NewDecoder(schema, reader) +} + +// Decode reads the next Avro encoded value from its input and stores it in the value pointed to by v. +func (d *Decoder) Decode(obj any) error { + if d.r.head == d.r.tail && d.r.reader != nil { + if !d.r.loadMore() { + return io.EOF + } + } + + d.r.ReadVal(d.s, obj) + + //nolint:errorlint // Only direct EOF errors should be discarded. + if d.r.Error == io.EOF { + return nil + } + return d.r.Error +} + +// Unmarshal parses the Avro encoded data and stores the result in the value pointed to by v. +// If v is nil or not a pointer, Unmarshal returns an error. +func Unmarshal(schema Schema, data []byte, v any) error { + return DefaultConfig.Unmarshal(schema, data, v) +} diff --git a/vendor/github.com/hamba/avro/v2/doc.go b/vendor/github.com/hamba/avro/v2/doc.go new file mode 100644 index 00000000..54455733 --- /dev/null +++ b/vendor/github.com/hamba/avro/v2/doc.go @@ -0,0 +1,4 @@ +// Package avro implements encoding and decoding of Avro as defined by the Avro specification. +// +// See the Avro specification for an understanding of Avro: http://avro.apache.org/docs/current/ +package avro diff --git a/vendor/github.com/hamba/avro/v2/encoder.go b/vendor/github.com/hamba/avro/v2/encoder.go new file mode 100644 index 00000000..faa285e6 --- /dev/null +++ b/vendor/github.com/hamba/avro/v2/encoder.go @@ -0,0 +1,37 @@ +package avro + +import ( + "io" +) + +// Encoder writes Avro values to an output stream. +type Encoder struct { + s Schema + w *Writer +} + +// NewEncoder returns a new encoder that writes to w using schema s. +func NewEncoder(s string, w io.Writer) (*Encoder, error) { + sch, err := Parse(s) + if err != nil { + return nil, err + } + return NewEncoderForSchema(sch, w), nil +} + +// NewEncoderForSchema returns a new encoder that writes to w using schema. +func NewEncoderForSchema(schema Schema, w io.Writer) *Encoder { + return DefaultConfig.NewEncoder(schema, w) +} + +// Encode writes the Avro encoding of v to the stream. +func (e *Encoder) Encode(v any) error { + e.w.WriteVal(e.s, v) + _ = e.w.Flush() + return e.w.Error +} + +// Marshal returns the Avro encoding of v. +func Marshal(schema Schema, v any) ([]byte, error) { + return DefaultConfig.Marshal(schema, v) +} diff --git a/vendor/github.com/hamba/avro/v2/noescape.go b/vendor/github.com/hamba/avro/v2/noescape.go new file mode 100644 index 00000000..89078463 --- /dev/null +++ b/vendor/github.com/hamba/avro/v2/noescape.go @@ -0,0 +1,21 @@ +package avro + +import ( + "unsafe" +) + +// noescape hides a pointer from escape analysis. noescape is +// the identity function but escape analysis doesn't think the +// output depends on the input. noescape is inlined and currently +// compiles down to zero instructions. +// USE CAREFULLY! +// +// This function is taken from Go std lib: +// https://github.com/golang/go/blob/master/src/runtime/stubs.go#L178 +// +//nolint:govet,staticcheck +//go:nosplit +func noescape(p unsafe.Pointer) unsafe.Pointer { + x := uintptr(p) + return unsafe.Pointer(x ^ 0) +} diff --git a/vendor/github.com/hamba/avro/v2/noescape.s b/vendor/github.com/hamba/avro/v2/noescape.s new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/hamba/avro/v2/pkg/crc64/crc64.go b/vendor/github.com/hamba/avro/v2/pkg/crc64/crc64.go new file mode 100644 index 00000000..15060778 --- /dev/null +++ b/vendor/github.com/hamba/avro/v2/pkg/crc64/crc64.go @@ -0,0 +1,97 @@ +// Package crc64 implements the Avro CRC-64 checksum. +// See https://avro.apache.org/docs/current/spec.html#schema_fingerprints for information. +package crc64 + +import ( + "hash" +) + +func init() { + buildTable() +} + +// Size is the of a CRC-64 checksum in bytes. +const Size = 8 + +// Empty is the empty checksum. +const Empty = 0xc15d213aa4d7a795 + +// Table is a 256-word table representing the polynomial for efficient processing. +type Table [256]uint64 + +func makeTable() *Table { + t := new(Table) + for i := 0; i < 256; i++ { + fp := uint64(i) + for j := 0; j < 8; j++ { + fp = (fp >> 1) ^ (Empty & -(fp & 1)) + } + t[i] = fp + } + return t +} + +var crc64Table *Table + +func buildTable() { + crc64Table = makeTable() +} + +type digest struct { + crc uint64 + tab *Table +} + +// New creates a new hash.Hash64 computing the Avro CRC-64 checksum. +// Its Sum method will lay the value out in big-endian byte order. +func New() hash.Hash64 { + return &digest{ + crc: Empty, + tab: crc64Table, + } +} + +// Size returns the bytes size of the checksum. +func (d *digest) Size() int { + return Size +} + +// BlockSize returns the block size of the checksum. +func (d *digest) BlockSize() int { + return 1 +} + +// Reset resets the hash instance. +func (d *digest) Reset() { + d.crc = Empty +} + +// Write accumulatively adds the given data to the checksum. +func (d *digest) Write(p []byte) (n int, err error) { + for i := 0; i < len(p); i++ { + d.crc = (d.crc >> 8) ^ d.tab[(int)(byte(d.crc)^p[i])&0xff] + } + + return len(p), nil +} + +// Sum64 returns the checksum as a uint64. +func (d *digest) Sum64() uint64 { + return d.crc +} + +// Sum returns the checksum as a byte slice, using the given byte slice. +func (d *digest) Sum(in []byte) []byte { + s := d.Sum64() + return append(in, byte(s>>56), byte(s>>48), byte(s>>40), byte(s>>32), byte(s>>24), byte(s>>16), byte(s>>8), byte(s)) +} + +// Sum returns the MD5 checksum of the data. +func Sum(data []byte) [Size]byte { + d := digest{crc: Empty, tab: crc64Table} + d.Reset() + _, _ = d.Write(data) + s := d.Sum64() + //nolint:lll + return [Size]byte{byte(s >> 56), byte(s >> 48), byte(s >> 40), byte(s >> 32), byte(s >> 24), byte(s >> 16), byte(s >> 8), byte(s)} +} diff --git a/vendor/github.com/hamba/avro/v2/protocol.go b/vendor/github.com/hamba/avro/v2/protocol.go new file mode 100644 index 00000000..28608eb8 --- /dev/null +++ b/vendor/github.com/hamba/avro/v2/protocol.go @@ -0,0 +1,377 @@ +package avro + +import ( + "crypto/md5" + "encoding/hex" + "errors" + "fmt" + "os" + + jsoniter "github.com/json-iterator/go" + "github.com/mitchellh/mapstructure" +) + +var ( + protocolReserved = []string{"doc", "types", "messages", "protocol", "namespace"} + messageReserved = []string{"doc", "response", "request", "errors", "one-way"} +) + +type protocolConfig struct { + doc string + props map[string]any +} + +// ProtocolOption is a function that sets a protocol option. +type ProtocolOption func(*protocolConfig) + +// WithProtoDoc sets the doc on a protocol. +func WithProtoDoc(doc string) ProtocolOption { + return func(opts *protocolConfig) { + opts.doc = doc + } +} + +// WithProtoProps sets the properties on a protocol. +func WithProtoProps(props map[string]any) ProtocolOption { + return func(opts *protocolConfig) { + opts.props = props + } +} + +// Protocol is an Avro protocol. +type Protocol struct { + name + properties + + types []NamedSchema + messages map[string]*Message + + doc string + + hash string +} + +// NewProtocol creates a protocol instance. +func NewProtocol( + name, namepsace string, + types []NamedSchema, + messages map[string]*Message, + opts ...ProtocolOption, +) (*Protocol, error) { + var cfg protocolConfig + for _, opt := range opts { + opt(&cfg) + } + + n, err := newName(name, namepsace, nil) + if err != nil { + return nil, err + } + + p := &Protocol{ + name: n, + properties: newProperties(cfg.props, protocolReserved), + types: types, + messages: messages, + doc: cfg.doc, + } + + b := md5.Sum([]byte(p.String())) + p.hash = hex.EncodeToString(b[:]) + + return p, nil +} + +// Message returns a message with the given name or nil. +func (p *Protocol) Message(name string) *Message { + return p.messages[name] +} + +// Doc returns the protocol doc. +func (p *Protocol) Doc() string { + return p.doc +} + +// Hash returns the MD5 hash of the protocol. +func (p *Protocol) Hash() string { + return p.hash +} + +// Types returns the types of the protocol. +func (p *Protocol) Types() []NamedSchema { + return p.types +} + +// String returns the canonical form of the protocol. +func (p *Protocol) String() string { + types := "" + for _, f := range p.types { + types += f.String() + "," + } + if len(types) > 0 { + types = types[:len(types)-1] + } + + messages := "" + for k, m := range p.messages { + messages += `"` + k + `":` + m.String() + "," + } + if len(messages) > 0 { + messages = messages[:len(messages)-1] + } + + return `{"protocol":"` + p.Name() + + `","namespace":"` + p.Namespace() + + `","types":[` + types + `],"messages":{` + messages + `}}` +} + +// Message is an Avro protocol message. +type Message struct { + properties + + req *RecordSchema + resp Schema + errs *UnionSchema + oneWay bool + + doc string +} + +// NewMessage creates a protocol message instance. +func NewMessage(req *RecordSchema, resp Schema, errors *UnionSchema, oneWay bool, opts ...ProtocolOption) *Message { + var cfg protocolConfig + for _, opt := range opts { + opt(&cfg) + } + + return &Message{ + properties: newProperties(cfg.props, messageReserved), + req: req, + resp: resp, + errs: errors, + oneWay: oneWay, + doc: cfg.doc, + } +} + +// Request returns the message request schema. +func (m *Message) Request() *RecordSchema { + return m.req +} + +// Response returns the message response schema. +func (m *Message) Response() Schema { + return m.resp +} + +// Errors returns the message errors union schema. +func (m *Message) Errors() *UnionSchema { + return m.errs +} + +// OneWay determines of the message is a one way message. +func (m *Message) OneWay() bool { + return m.oneWay +} + +// Doc returns the message doc. +func (m *Message) Doc() string { + return m.doc +} + +// String returns the canonical form of the message. +func (m *Message) String() string { + fields := "" + for _, f := range m.req.fields { + fields += f.String() + "," + } + if len(fields) > 0 { + fields = fields[:len(fields)-1] + } + + str := `{"request":[` + fields + `]` + if m.resp != nil { + str += `,"response":` + m.resp.String() + } + if m.errs != nil && len(m.errs.Types()) > 1 { + errs, _ := NewUnionSchema(m.errs.Types()[1:]) + str += `,"errors":` + errs.String() + } + str += "}" + return str +} + +// ParseProtocolFile parses an Avro protocol from a file. +func ParseProtocolFile(path string) (*Protocol, error) { + s, err := os.ReadFile(path) + if err != nil { + return nil, err + } + + return ParseProtocol(string(s)) +} + +// MustParseProtocol parses an Avro protocol, panicing if there is an error. +func MustParseProtocol(protocol string) *Protocol { + parsed, err := ParseProtocol(protocol) + if err != nil { + panic(err) + } + + return parsed +} + +// ParseProtocol parses an Avro protocol. +func ParseProtocol(protocol string) (*Protocol, error) { + cache := &SchemaCache{} + + var m map[string]any + if err := jsoniter.Unmarshal([]byte(protocol), &m); err != nil { + return nil, err + } + + seen := seenCache{} + return parseProtocol(m, seen, cache) +} + +type protocol struct { + Protocol string `mapstructure:"protocol"` + Namespace string `mapstructure:"namespace"` + Doc string `mapstructure:"doc"` + Types []any `mapstructure:"types"` + Messages map[string]map[string]any `mapstructure:"messages"` + Props map[string]any `mapstructure:",remain"` +} + +func parseProtocol(m map[string]any, seen seenCache, cache *SchemaCache) (*Protocol, error) { + var ( + p protocol + meta mapstructure.Metadata + ) + if err := decodeMap(m, &p, &meta); err != nil { + return nil, fmt.Errorf("avro: error decoding protocol: %w", err) + } + + if err := checkParsedName(p.Protocol, p.Namespace, hasKey(meta.Keys, "namespace")); err != nil { + return nil, err + } + + var ( + types []NamedSchema + err error + ) + if len(p.Types) > 0 { + types, err = parseProtocolTypes(p.Namespace, p.Types, seen, cache) + if err != nil { + return nil, err + } + } + + messages := map[string]*Message{} + if len(p.Messages) > 0 { + for k, msg := range p.Messages { + message, err := parseMessage(p.Namespace, msg, seen, cache) + if err != nil { + return nil, err + } + + messages[k] = message + } + } + + return NewProtocol(p.Protocol, p.Namespace, types, messages, WithProtoDoc(p.Doc), WithProtoProps(p.Props)) +} + +func parseProtocolTypes(namespace string, types []any, seen seenCache, cache *SchemaCache) ([]NamedSchema, error) { + ts := make([]NamedSchema, len(types)) + for i, typ := range types { + schema, err := parseType(namespace, typ, seen, cache) + if err != nil { + return nil, err + } + + namedSchema, ok := schema.(NamedSchema) + if !ok { + return nil, errors.New("avro: protocol types must be named schemas") + } + + ts[i] = namedSchema + } + + return ts, nil +} + +type message struct { + Doc string `mapstructure:"doc"` + Request []map[string]any `mapstructure:"request"` + Response any `mapstructure:"response"` + Errors []any `mapstructure:"errors"` + OneWay bool `mapstructure:"one-way"` + Props map[string]any `mapstructure:",remain"` +} + +func parseMessage(namespace string, m map[string]any, seen seenCache, cache *SchemaCache) (*Message, error) { + var ( + msg message + meta mapstructure.Metadata + ) + if err := decodeMap(m, &msg, &meta); err != nil { + return nil, fmt.Errorf("avro: error decoding message: %w", err) + } + + fields := make([]*Field, len(msg.Request)) + for i, f := range msg.Request { + field, err := parseField(namespace, f, seen, cache) + if err != nil { + return nil, err + } + fields[i] = field + } + request := &RecordSchema{ + name: name{}, + properties: properties{}, + fields: fields, + } + + var response Schema + if msg.Response != nil { + schema, err := parseType(namespace, msg.Response, seen, cache) + if err != nil { + return nil, err + } + + if schema.Type() != Null { + response = schema + } + } + + types := []Schema{NewPrimitiveSchema(String, nil)} + if len(msg.Errors) > 0 { + for _, e := range msg.Errors { + schema, err := parseType(namespace, e, seen, cache) + if err != nil { + return nil, err + } + + if rec, ok := schema.(*RecordSchema); ok && !rec.IsError() { + return nil, errors.New("avro: errors record schema must be of type error") + } + + types = append(types, schema) + } + } + errs, err := NewUnionSchema(types) + if err != nil { + return nil, err + } + + oneWay := msg.OneWay + if hasKey(meta.Keys, "one-way") && oneWay && (len(errs.Types()) > 1 || response != nil) { + return nil, errors.New("avro: one-way messages cannot not have a response or errors") + } + if !oneWay && len(errs.Types()) <= 1 && response == nil { + oneWay = true + } + + return NewMessage(request, response, errs, oneWay, WithProtoDoc(msg.Doc), WithProtoProps(msg.Props)), nil +} diff --git a/vendor/github.com/hamba/avro/v2/reader.go b/vendor/github.com/hamba/avro/v2/reader.go new file mode 100644 index 00000000..0f34cb0a --- /dev/null +++ b/vendor/github.com/hamba/avro/v2/reader.go @@ -0,0 +1,324 @@ +package avro + +import ( + "errors" + "fmt" + "io" + "strings" + "unsafe" +) + +const ( + maxIntBufSize = 5 + maxLongBufSize = 10 +) + +// ReaderFunc is a function used to customize the Reader. +type ReaderFunc func(r *Reader) + +// WithReaderConfig specifies the configuration to use with a reader. +func WithReaderConfig(cfg API) ReaderFunc { + return func(r *Reader) { + r.cfg = cfg.(*frozenConfig) + } +} + +// Reader is an Avro specific io.Reader. +type Reader struct { + cfg *frozenConfig + reader io.Reader + slab []byte + buf []byte + head int + tail int + Error error +} + +// NewReader creates a new Reader. +func NewReader(r io.Reader, bufSize int, opts ...ReaderFunc) *Reader { + reader := &Reader{ + cfg: DefaultConfig.(*frozenConfig), + reader: r, + buf: make([]byte, bufSize), + head: 0, + tail: 0, + } + + for _, opt := range opts { + opt(reader) + } + + return reader +} + +// Reset resets a Reader with a new byte array attached. +func (r *Reader) Reset(b []byte) *Reader { + r.reader = nil + r.buf = b + r.head = 0 + r.tail = len(b) + return r +} + +// ReportError record an error in iterator instance with current position. +func (r *Reader) ReportError(operation, msg string) { + if r.Error != nil && !errors.Is(r.Error, io.EOF) { + return + } + + r.Error = fmt.Errorf("avro: %s: %s", operation, msg) +} + +func (r *Reader) loadMore() bool { + if r.reader == nil { + if r.Error == nil { + r.head = r.tail + r.Error = io.EOF + } + return false + } + + for { + n, err := r.reader.Read(r.buf) + if n == 0 { + if err != nil { + if r.Error == nil { + r.Error = err + } + return false + } + continue + } + + r.head = 0 + r.tail = n + return true + } +} + +func (r *Reader) readByte() byte { + if r.head == r.tail { + if !r.loadMore() { + r.Error = io.ErrUnexpectedEOF + return 0 + } + } + + b := r.buf[r.head] + r.head++ + + return b +} + +// Peek returns the next byte in the buffer. +// The Reader Error will be io.EOF if no next byte exists. +func (r *Reader) Peek() byte { + if r.head == r.tail { + if !r.loadMore() { + return 0 + } + } + return r.buf[r.head] +} + +// Read reads data into the given bytes. +func (r *Reader) Read(b []byte) { + size := len(b) + read := 0 + + for read < size { + if r.head == r.tail { + if !r.loadMore() { + r.Error = io.ErrUnexpectedEOF + return + } + } + + n := copy(b[read:], r.buf[r.head:r.tail]) + r.head += n + read += n + } +} + +// ReadBool reads a Bool from the Reader. +func (r *Reader) ReadBool() bool { + b := r.readByte() + + if b != 0 && b != 1 { + r.ReportError("ReadBool", "invalid bool") + } + return b == 1 +} + +// ReadInt reads an Int from the Reader. +// +//nolint:dupl +func (r *Reader) ReadInt() int32 { + if r.Error != nil { + return 0 + } + + var ( + n int + v uint32 + s uint8 + ) + + for { + tail := r.tail + if r.tail-r.head+n > maxIntBufSize { + tail = r.head + maxIntBufSize - n + } + + // Consume what it is in the buffer. + var i int + for _, b := range r.buf[r.head:tail] { + v |= uint32(b&0x7f) << s + if b&0x80 == 0 { + r.head += i + 1 + return int32((v >> 1) ^ -(v & 1)) + } + s += 7 + i++ + } + if n >= maxIntBufSize { + r.ReportError("ReadInt", "int overflow") + return 0 + } + r.head += i + n += i + + // We ran out of buffer and are not at the end of the int, + // Read more into the buffer. + if !r.loadMore() { + r.Error = fmt.Errorf("reading int: %w", r.Error) + return 0 + } + } +} + +// ReadLong reads a Long from the Reader. +// +//nolint:dupl +func (r *Reader) ReadLong() int64 { + if r.Error != nil { + return 0 + } + + var ( + n int + v uint64 + s uint8 + ) + + for { + tail := r.tail + if r.tail-r.head+n > maxLongBufSize { + tail = r.head + maxLongBufSize - n + } + + // Consume what it is in the buffer. + var i int + for _, b := range r.buf[r.head:tail] { + v |= uint64(b&0x7f) << s + if b&0x80 == 0 { + r.head += i + 1 + return int64((v >> 1) ^ -(v & 1)) + } + s += 7 + i++ + } + if n >= maxLongBufSize { + r.ReportError("ReadLong", "int overflow") + return 0 + } + r.head += i + n += i + + // We ran out of buffer and are not at the end of the long, + // Read more into the buffer. + if !r.loadMore() { + r.Error = fmt.Errorf("reading long: %w", r.Error) + return 0 + } + } +} + +// ReadFloat reads a Float from the Reader. +func (r *Reader) ReadFloat() float32 { + var buf [4]byte + r.Read(buf[:]) + + float := *(*float32)(unsafe.Pointer(&buf[0])) + return float +} + +// ReadDouble reads a Double from the Reader. +func (r *Reader) ReadDouble() float64 { + var buf [8]byte + r.Read(buf[:]) + + float := *(*float64)(unsafe.Pointer(&buf[0])) + return float +} + +// ReadBytes reads Bytes from the Reader. +func (r *Reader) ReadBytes() []byte { + return r.readBytes("bytes") +} + +// ReadString reads a String from the Reader. +func (r *Reader) ReadString() string { + b := r.readBytes("string") + if len(b) == 0 { + return "" + } + + return *(*string)(unsafe.Pointer(&b)) +} + +func (r *Reader) readBytes(op string) []byte { + size := int(r.ReadLong()) + if size < 0 { + fnName := "Read" + strings.ToTitle(op) + r.ReportError(fnName, "invalid "+op+" length") + return nil + } + if size == 0 { + return []byte{} + } + if max := r.cfg.getMaxByteSliceSize(); max > 0 && size > max { + fnName := "Read" + strings.ToTitle(op) + r.ReportError(fnName, "size is greater than `Config.MaxByteSliceSize`") + return nil + } + + // The bytes are entirely in the buffer and of a reasonable size. + // Use the byte slab. + if r.head+size <= r.tail && size <= 1024 { + if cap(r.slab) < size { + r.slab = make([]byte, 1024) + } + dst := r.slab[:size] + r.slab = r.slab[size:] + copy(dst, r.buf[r.head:r.head+size]) + r.head += size + return dst + } + + buf := make([]byte, size) + r.Read(buf) + return buf +} + +// ReadBlockHeader reads a Block Header from the Reader. +func (r *Reader) ReadBlockHeader() (int64, int64) { + length := r.ReadLong() + if length < 0 { + size := r.ReadLong() + + return -length, size + } + + return length, 0 +} diff --git a/vendor/github.com/hamba/avro/v2/reader_generic.go b/vendor/github.com/hamba/avro/v2/reader_generic.go new file mode 100644 index 00000000..b75d240e --- /dev/null +++ b/vendor/github.com/hamba/avro/v2/reader_generic.go @@ -0,0 +1,163 @@ +package avro + +import ( + "fmt" + "reflect" + "time" +) + +// ReadNext reads the next Avro element as a generic interface. +func (r *Reader) ReadNext(schema Schema) any { + var ls LogicalSchema + lts, ok := schema.(LogicalTypeSchema) + if ok { + ls = lts.Logical() + } + + switch schema.Type() { + case Boolean: + return r.ReadBool() + case Int: + if ls != nil { + switch ls.Type() { + case Date: + i := r.ReadInt() + sec := int64(i) * int64(24*time.Hour/time.Second) + return time.Unix(sec, 0).UTC() + + case TimeMillis: + return time.Duration(r.ReadInt()) * time.Millisecond + } + } + return int(r.ReadInt()) + case Long: + if ls != nil { + switch ls.Type() { + case TimeMicros: + return time.Duration(r.ReadLong()) * time.Microsecond + + case TimestampMillis: + i := r.ReadLong() + sec := i / 1e3 + nsec := (i - sec*1e3) * 1e6 + return time.Unix(sec, nsec).UTC() + + case TimestampMicros: + i := r.ReadLong() + sec := i / 1e6 + nsec := (i - sec*1e6) * 1e3 + return time.Unix(sec, nsec).UTC() + } + } + return r.ReadLong() + case Float: + return r.ReadFloat() + case Double: + return r.ReadDouble() + case String: + return r.ReadString() + case Bytes: + if ls != nil && ls.Type() == Decimal { + dec := ls.(*DecimalLogicalSchema) + return ratFromBytes(r.ReadBytes(), dec.Scale()) + } + return r.ReadBytes() + case Record: + fields := schema.(*RecordSchema).Fields() + obj := make(map[string]any, len(fields)) + for _, field := range fields { + obj[field.Name()] = r.ReadNext(field.Type()) + } + return obj + case Ref: + return r.ReadNext(schema.(*RefSchema).Schema()) + case Enum: + symbols := schema.(*EnumSchema).Symbols() + idx := int(r.ReadInt()) + if idx < 0 || idx >= len(symbols) { + r.ReportError("Read", "unknown enum symbol") + return nil + } + return symbols[idx] + case Array: + arr := []any{} + r.ReadArrayCB(func(r *Reader) bool { + elem := r.ReadNext(schema.(*ArraySchema).Items()) + arr = append(arr, elem) + return true + }) + return arr + case Map: + obj := map[string]any{} + r.ReadMapCB(func(r *Reader, field string) bool { + elem := r.ReadNext(schema.(*MapSchema).Values()) + obj[field] = elem + return true + }) + return obj + case Union: + types := schema.(*UnionSchema).Types() + idx := int(r.ReadLong()) + if idx < 0 || idx > len(types)-1 { + r.ReportError("Read", "unknown union type") + return nil + } + schema = types[idx] + if schema.Type() == Null { + return nil + } + + key := schemaTypeName(schema) + obj := map[string]any{} + obj[key] = r.ReadNext(types[idx]) + return obj + case Fixed: + size := schema.(*FixedSchema).Size() + obj := make([]byte, size) + r.Read(obj) + if ls != nil && ls.Type() == Decimal { + dec := ls.(*DecimalLogicalSchema) + return ratFromBytes(obj, dec.Scale()) + } + return byteSliceToArray(obj, size) + default: + r.ReportError("Read", fmt.Sprintf("unexpected schema type: %v", schema.Type())) + return nil + } +} + +// ReadArrayCB reads an array with a callback per item. +func (r *Reader) ReadArrayCB(fn func(*Reader) bool) { + for { + l, _ := r.ReadBlockHeader() + if l == 0 { + break + } + for i := 0; i < int(l); i++ { + fn(r) + } + } +} + +// ReadMapCB reads an array with a callback per item. +func (r *Reader) ReadMapCB(fn func(*Reader, string) bool) { + for { + l, _ := r.ReadBlockHeader() + if l == 0 { + break + } + + for i := 0; i < int(l); i++ { + field := r.ReadString() + fn(r, field) + } + } +} + +var byteType = reflect.TypeOf((*byte)(nil)).Elem() + +func byteSliceToArray(b []byte, size int) any { + vArr := reflect.New(reflect.ArrayOf(size, byteType)).Elem() + reflect.Copy(vArr, reflect.ValueOf(b)) + return vArr.Interface() +} diff --git a/vendor/github.com/hamba/avro/v2/reader_skip.go b/vendor/github.com/hamba/avro/v2/reader_skip.go new file mode 100644 index 00000000..94288c8c --- /dev/null +++ b/vendor/github.com/hamba/avro/v2/reader_skip.go @@ -0,0 +1,79 @@ +package avro + +// SkipNBytes skips the given number of bytes in the reader. +func (r *Reader) SkipNBytes(n int) { + read := 0 + for read < n { + if r.head == r.tail { + if !r.loadMore() { + return + } + } + + if read+r.tail-r.head < n { + read += r.tail - r.head + r.head = r.tail + continue + } + + r.head += n - read + read += n - read + } +} + +// SkipBool skips a Bool in the reader. +func (r *Reader) SkipBool() { + _ = r.readByte() +} + +// SkipInt skips an Int in the reader. +func (r *Reader) SkipInt() { + var n int + for r.Error == nil && n < maxIntBufSize { + b := r.readByte() + if b&0x80 == 0 { + break + } + n++ + } +} + +// SkipLong skips a Long in the reader. +func (r *Reader) SkipLong() { + var n int + for r.Error == nil && n < maxLongBufSize { + b := r.readByte() + if b&0x80 == 0 { + break + } + n++ + } +} + +// SkipFloat skips a Float in the reader. +func (r *Reader) SkipFloat() { + r.SkipNBytes(4) +} + +// SkipDouble skips a Double in the reader. +func (r *Reader) SkipDouble() { + r.SkipNBytes(8) +} + +// SkipString skips a String in the reader. +func (r *Reader) SkipString() { + size := r.ReadLong() + if size <= 0 { + return + } + r.SkipNBytes(int(size)) +} + +// SkipBytes skips Bytes in the reader. +func (r *Reader) SkipBytes() { + size := r.ReadLong() + if size <= 0 { + return + } + r.SkipNBytes(int(size)) +} diff --git a/vendor/github.com/hamba/avro/v2/resolver.go b/vendor/github.com/hamba/avro/v2/resolver.go new file mode 100644 index 00000000..c1b6ab65 --- /dev/null +++ b/vendor/github.com/hamba/avro/v2/resolver.go @@ -0,0 +1,90 @@ +package avro + +import ( + "fmt" + "math/big" + "sync" + "time" + + "github.com/modern-go/reflect2" +) + +// TypeResolver resolves types by name. +type TypeResolver struct { + names sync.Map // map[string]reflect2.Type + types sync.Map // map[int][]string +} + +// NewTypeResolver creates a new type resolver with all primitive types +// registered. +func NewTypeResolver() *TypeResolver { + r := &TypeResolver{} + + // Register basic types + r.Register(string(Null), &null{}) + r.Register(string(Int), int8(0)) + r.Register(string(Int), int16(0)) + r.Register(string(Int), int32(0)) + r.Register(string(Int), int(0)) + r.Register(string(Long), int(0)) + r.Register(string(Long), int64(0)) + r.Register(string(Float), float32(0)) + r.Register(string(Double), float64(0)) + r.Register(string(String), "") + r.Register(string(Bytes), []byte{}) + r.Register(string(Boolean), true) + + // Register logical types + r.Register(string(Int)+"."+string(Date), time.Time{}) + r.Register(string(Int)+"."+string(TimeMillis), time.Duration(0)) + r.Register(string(Long)+"."+string(TimestampMillis), time.Time{}) + r.Register(string(Long)+"."+string(TimestampMicros), time.Time{}) + r.Register(string(Long)+"."+string(TimeMicros), time.Duration(0)) + r.Register(string(Bytes)+"."+string(Decimal), big.NewRat(1, 1)) + r.Register(string(String)+"."+string(UUID), "") + + return r +} + +// Register registers names to their types for resolution. +func (r *TypeResolver) Register(name string, obj any) { + typ := reflect2.TypeOf(obj) + rtype := typ.RType() + + r.names.Store(name, typ) + + raw, ok := r.types.LoadOrStore(rtype, []string{name}) + if !ok { + return + } + names := raw.([]string) + names = append(names, name) + r.types.Store(rtype, names) +} + +// Name gets the name for a type, or an error. +func (r *TypeResolver) Name(typ reflect2.Type) ([]string, error) { + rtype := typ.RType() + + names, ok := r.types.Load(rtype) + if !ok { + return nil, fmt.Errorf("avro: unable to resolve type %s", typ.String()) + } + + return names.([]string), nil +} + +// Type gets the type for a name, or an error. +func (r *TypeResolver) Type(name string) (reflect2.Type, error) { + typ, ok := r.names.Load(name) + if !ok { + return nil, fmt.Errorf("avro: unable to resolve type with name %s", name) + } + + return typ.(reflect2.Type), nil +} + +// Register registers names to their types for resolution. All primitive types are pre-registered. +func Register(name string, obj any) { + DefaultConfig.Register(name, obj) +} diff --git a/vendor/github.com/hamba/avro/v2/schema.go b/vendor/github.com/hamba/avro/v2/schema.go new file mode 100644 index 00000000..aa3b1c5a --- /dev/null +++ b/vendor/github.com/hamba/avro/v2/schema.go @@ -0,0 +1,1733 @@ +package avro + +import ( + "bytes" + "crypto/md5" + "crypto/sha256" + "errors" + "fmt" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + + "github.com/hamba/avro/v2/pkg/crc64" + jsoniter "github.com/json-iterator/go" +) + +var nullDefault = struct{}{} + +var ( + schemaReserved = []string{ + "doc", "fields", "items", "name", "namespace", "size", "symbols", + "values", "type", "aliases", "logicalType", "precision", "scale", + } + fieldReserved = []string{"default", "doc", "name", "order", "type", "aliases"} +) + +// Type is a schema type. +type Type string + +// Schema type constants. +const ( + Record Type = "record" + Error Type = "error" + Ref Type = "" + Enum Type = "enum" + Array Type = "array" + Map Type = "map" + Union Type = "union" + Fixed Type = "fixed" + String Type = "string" + Bytes Type = "bytes" + Int Type = "int" + Long Type = "long" + Float Type = "float" + Double Type = "double" + Boolean Type = "boolean" + Null Type = "null" +) + +// Order is a field order. +type Order string + +// Field orders. +const ( + Asc Order = "ascending" + Desc Order = "descending" + Ignore Order = "ignore" +) + +// LogicalType is a schema logical type. +type LogicalType string + +// Schema logical type constants. +const ( + Decimal LogicalType = "decimal" + UUID LogicalType = "uuid" + Date LogicalType = "date" + TimeMillis LogicalType = "time-millis" + TimeMicros LogicalType = "time-micros" + TimestampMillis LogicalType = "timestamp-millis" + TimestampMicros LogicalType = "timestamp-micros" + LocalTimestampMillis LogicalType = "local-timestamp-millis" + LocalTimestampMicros LogicalType = "local-timestamp-micros" + Duration LogicalType = "duration" +) + +// Action is a field action used during decoding process. +type Action string + +// Action type constants. +const ( + FieldIgnore Action = "ignore" + FieldSetDefault Action = "set_default" +) + +// FingerprintType is a fingerprinting algorithm. +type FingerprintType string + +// Fingerprint type constants. +const ( + CRC64Avro FingerprintType = "CRC64-AVRO" + MD5 FingerprintType = "MD5" + SHA256 FingerprintType = "SHA256" +) + +// SchemaCache is a cache of schemas. +type SchemaCache struct { + cache sync.Map // map[string]Schema +} + +// Add adds a schema to the cache with the given name. +func (c *SchemaCache) Add(name string, schema Schema) { + c.cache.Store(name, schema) +} + +// Get returns the Schema if it exists. +func (c *SchemaCache) Get(name string) Schema { + if v, ok := c.cache.Load(name); ok { + return v.(Schema) + } + + return nil +} + +// Schemas is a slice of Schemas. +type Schemas []Schema + +// Get gets a schema and position by type or name if it is a named schema. +func (s Schemas) Get(name string) (Schema, int) { + for i, schema := range s { + if schemaTypeName(schema) == name { + return schema, i + } + } + + return nil, -1 +} + +// Schema represents an Avro schema. +type Schema interface { + // Type returns the type of the schema. + Type() Type + + // String returns the canonical form of the schema. + String() string + + // Fingerprint returns the SHA256 fingerprint of the schema. + Fingerprint() [32]byte + + // FingerprintUsing returns the fingerprint of the schema using the given algorithm or an error. + FingerprintUsing(FingerprintType) ([]byte, error) + + // CacheFingerprint returns the unique identity of the schema. + // This returns a unique identity for schemas resolved from a writer schema, otherwise it returns + // the schemas Fingerprint. + CacheFingerprint() [32]byte +} + +// LogicalSchema represents an Avro schema with a logical type. +type LogicalSchema interface { + // Type returns the type of the logical schema. + Type() LogicalType + + // String returns the canonical form of the logical schema. + String() string +} + +// PropertySchema represents a schema with properties. +type PropertySchema interface { + // Prop gets a property from the schema. + Prop(string) any +} + +// NamedSchema represents a schema with a name. +type NamedSchema interface { + Schema + PropertySchema + + // Name returns the name of the schema. + Name() string + + // Namespace returns the namespace of a schema. + Namespace() string + + // FullName returns the full qualified name of a schema. + FullName() string + + // Aliases returns the full qualified aliases of a schema. + Aliases() []string +} + +// LogicalTypeSchema represents a schema that can contain a logical type. +type LogicalTypeSchema interface { + // Logical returns the logical schema or nil. + Logical() LogicalSchema +} + +type name struct { + name string + namespace string + full string + aliases []string +} + +func newName(n, ns string, aliases []string) (name, error) { + if idx := strings.LastIndexByte(n, '.'); idx > -1 { + ns = n[:idx] + n = n[idx+1:] + } + + full := n + if ns != "" { + full = ns + "." + n + } + + for _, part := range strings.Split(full, ".") { + if err := validateName(part); err != nil { + return name{}, fmt.Errorf("avro: invalid name part %q in name %q: %w", full, part, err) + } + } + + a := make([]string, 0, len(aliases)) + for _, alias := range aliases { + if !strings.Contains(alias, ".") { + if err := validateName(alias); err != nil { + return name{}, fmt.Errorf("avro: invalid name %q: %w", alias, err) + } + if ns == "" { + a = append(a, alias) + continue + } + a = append(a, ns+"."+alias) + continue + } + + for _, part := range strings.Split(alias, ".") { + if err := validateName(part); err != nil { + return name{}, fmt.Errorf("avro: invalid name part %q in name %q: %w", full, part, err) + } + } + a = append(a, alias) + } + + return name{ + name: n, + namespace: ns, + full: full, + aliases: a, + }, nil +} + +// Name returns the name of a schema. +func (n name) Name() string { + return n.name +} + +// Namespace returns the namespace of a schema. +func (n name) Namespace() string { + return n.namespace +} + +// FullName returns the fully qualified name of a schema. +func (n name) FullName() string { + return n.full +} + +// Aliases returns the fully qualified aliases of a schema. +func (n name) Aliases() []string { + return n.aliases +} + +type fingerprinter struct { + fingerprint atomic.Value // [32]byte + cache sync.Map // map[FingerprintType][]byte +} + +// Fingerprint returns the SHA256 fingerprint of the schema. +func (f *fingerprinter) Fingerprint(stringer fmt.Stringer) [32]byte { + if v := f.fingerprint.Load(); v != nil { + return v.([32]byte) + } + + fingerprint := sha256.Sum256([]byte(stringer.String())) + f.fingerprint.Store(fingerprint) + return fingerprint +} + +// FingerprintUsing returns the fingerprint of the schema using the given algorithm or an error. +func (f *fingerprinter) FingerprintUsing(typ FingerprintType, stringer fmt.Stringer) ([]byte, error) { + if v, ok := f.cache.Load(typ); ok { + return v.([]byte), nil + } + + data := []byte(stringer.String()) + + var fingerprint []byte + switch typ { + case CRC64Avro: + h := crc64.Sum(data) + fingerprint = h[:] + case MD5: + h := md5.Sum(data) + fingerprint = h[:] + case SHA256: + h := sha256.Sum256(data) + fingerprint = h[:] + default: + return nil, fmt.Errorf("avro: unknown fingerprint algorithm %s", typ) + } + + f.cache.Store(typ, fingerprint) + return fingerprint, nil +} + +type cacheFingerprinter struct { + writerFingerprint *[32]byte + + cache atomic.Value // [32]byte +} + +// CacheFingerprint returns the SHA256 identity of the schema. +func (i *cacheFingerprinter) CacheFingerprint(schema Schema, fn func() []byte) [32]byte { + if v := i.cache.Load(); v != nil { + return v.([32]byte) + } + + if i.writerFingerprint == nil { + fp := schema.Fingerprint() + i.cache.Store(fp) + return fp + } + + fp := schema.Fingerprint() + d := append([]byte{}, fp[:]...) + d = append(d, (*i.writerFingerprint)[:]...) + if fn != nil { + d = append(d, fn()...) + } + ident := sha256.Sum256(d) + i.cache.Store(ident) + return ident +} + +type properties struct { + props map[string]any +} + +func newProperties(props map[string]any, res []string) properties { + p := properties{props: map[string]any{}} + for k, v := range props { + if isReserved(res, k) { + continue + } + p.props[k] = v + } + return p +} + +func isReserved(res []string, k string) bool { + for _, r := range res { + if k == r { + return true + } + } + return false +} + +// Prop gets a property from the schema. +func (p properties) Prop(name string) any { + if p.props == nil { + return nil + } + + return p.props[name] +} + +func (p properties) marshalPropertiesToJSON(buf *bytes.Buffer) error { + sortedPropertyKeys := make([]string, 0, len(p.props)) + for k := range p.props { + sortedPropertyKeys = append(sortedPropertyKeys, k) + } + sort.Strings(sortedPropertyKeys) + for _, k := range sortedPropertyKeys { + vv, err := jsoniter.Marshal(p.props[k]) + if err != nil { + return err + } + kk, err := jsoniter.Marshal(k) + if err != nil { + return err + } + buf.WriteString(`,`) + buf.Write(kk) + buf.WriteString(`:`) + buf.Write(vv) + } + return nil +} + +type schemaConfig struct { + aliases []string + doc string + def any + order Order + props map[string]any + wfp *[32]byte +} + +// SchemaOption is a function that sets a schema option. +type SchemaOption func(*schemaConfig) + +// WithAliases sets the aliases on a schema. +func WithAliases(aliases []string) SchemaOption { + return func(opts *schemaConfig) { + opts.aliases = aliases + } +} + +// WithDoc sets the doc on a schema. +func WithDoc(doc string) SchemaOption { + return func(opts *schemaConfig) { + opts.doc = doc + } +} + +// WithDefault sets the default on a schema. +func WithDefault(def any) SchemaOption { + return func(opts *schemaConfig) { + opts.def = def + } +} + +// WithOrder sets the order on a schema. +func WithOrder(order Order) SchemaOption { + return func(opts *schemaConfig) { + opts.order = order + } +} + +// WithProps sets the properties on a schema. +func WithProps(props map[string]any) SchemaOption { + return func(opts *schemaConfig) { + opts.props = props + } +} + +func withWriterFingerprint(fp [32]byte) SchemaOption { + return func(opts *schemaConfig) { + opts.wfp = &fp + } +} + +func withWriterFingerprintIfResolved(fp [32]byte, resolved bool) SchemaOption { + return func(opts *schemaConfig) { + if resolved { + opts.wfp = &fp + } + } +} + +// PrimitiveSchema is an Avro primitive type schema. +type PrimitiveSchema struct { + properties + fingerprinter + cacheFingerprinter + + typ Type + logical LogicalSchema + + // encodedType is the type of the encoded value, if it is different from the typ. + // It's only used in the context of write-read schema resolution. + encodedType Type +} + +// NewPrimitiveSchema creates a new PrimitiveSchema. +func NewPrimitiveSchema(t Type, l LogicalSchema, opts ...SchemaOption) *PrimitiveSchema { + var cfg schemaConfig + for _, opt := range opts { + opt(&cfg) + } + + return &PrimitiveSchema{ + properties: newProperties(cfg.props, schemaReserved), + cacheFingerprinter: cacheFingerprinter{writerFingerprint: cfg.wfp}, + typ: t, + logical: l, + } +} + +// Type returns the type of the schema. +func (s *PrimitiveSchema) Type() Type { + return s.typ +} + +// Logical returns the logical schema or nil. +func (s *PrimitiveSchema) Logical() LogicalSchema { + return s.logical +} + +// String returns the canonical form of the schema. +func (s *PrimitiveSchema) String() string { + if s.logical == nil { + return `"` + string(s.typ) + `"` + } + + return `{"type":"` + string(s.typ) + `",` + s.logical.String() + `}` +} + +// MarshalJSON marshals the schema to json. +func (s *PrimitiveSchema) MarshalJSON() ([]byte, error) { + if s.logical == nil && len(s.props) == 0 { + return jsoniter.Marshal(s.typ) + } + + buf := new(bytes.Buffer) + buf.WriteString(`{"type":"` + string(s.typ) + `"`) + if s.logical != nil { + buf.WriteString(`,"logicalType":"` + string(s.logical.Type()) + `"`) + if d, ok := s.logical.(*DecimalLogicalSchema); ok { + buf.WriteString(`,"precision":` + strconv.Itoa(d.prec)) + if d.scale > 0 { + buf.WriteString(`,"scale":` + strconv.Itoa(d.scale)) + } + } + } + if err := s.marshalPropertiesToJSON(buf); err != nil { + return nil, err + } + buf.WriteString("}") + return buf.Bytes(), nil +} + +// Fingerprint returns the SHA256 fingerprint of the schema. +func (s *PrimitiveSchema) Fingerprint() [32]byte { + return s.fingerprinter.Fingerprint(s) +} + +// FingerprintUsing returns the fingerprint of the schema using the given algorithm or an error. +func (s *PrimitiveSchema) FingerprintUsing(typ FingerprintType) ([]byte, error) { + return s.fingerprinter.FingerprintUsing(typ, s) +} + +// CacheFingerprint returns unique identity of the schema. +func (s *PrimitiveSchema) CacheFingerprint() [32]byte { + return s.cacheFingerprinter.CacheFingerprint(s, nil) +} + +// RecordSchema is an Avro record type schema. +type RecordSchema struct { + name + properties + fingerprinter + cacheFingerprinter + isError bool + fields []*Field + doc string +} + +// NewRecordSchema creates a new record schema instance. +func NewRecordSchema(name, namespace string, fields []*Field, opts ...SchemaOption) (*RecordSchema, error) { + var cfg schemaConfig + for _, opt := range opts { + opt(&cfg) + } + + n, err := newName(name, namespace, cfg.aliases) + if err != nil { + return nil, err + } + + return &RecordSchema{ + name: n, + properties: newProperties(cfg.props, schemaReserved), + cacheFingerprinter: cacheFingerprinter{writerFingerprint: cfg.wfp}, + fields: fields, + doc: cfg.doc, + }, nil +} + +// NewErrorRecordSchema creates a new error record schema instance. +func NewErrorRecordSchema(name, namespace string, fields []*Field, opts ...SchemaOption) (*RecordSchema, error) { + rec, err := NewRecordSchema(name, namespace, fields, opts...) + if err != nil { + return nil, err + } + + rec.isError = true + + return rec, nil +} + +// Type returns the type of the schema. +func (s *RecordSchema) Type() Type { + return Record +} + +// Doc returns the documentation of a record. +func (s *RecordSchema) Doc() string { + return s.doc +} + +// IsError determines is this is an error record. +func (s *RecordSchema) IsError() bool { + return s.isError +} + +// Fields returns the fields of a record. +func (s *RecordSchema) Fields() []*Field { + return s.fields +} + +// String returns the canonical form of the schema. +func (s *RecordSchema) String() string { + typ := "record" + if s.isError { + typ = "error" + } + + fields := "" + for _, f := range s.fields { + fields += f.String() + "," + } + if len(fields) > 0 { + fields = fields[:len(fields)-1] + } + + return `{"name":"` + s.FullName() + `","type":"` + typ + `","fields":[` + fields + `]}` +} + +// MarshalJSON marshals the schema to json. +func (s *RecordSchema) MarshalJSON() ([]byte, error) { + buf := new(bytes.Buffer) + buf.WriteString(`{"name":"` + s.full + `"`) + if len(s.aliases) > 0 { + aliasesJSON, err := jsoniter.Marshal(s.aliases) + if err != nil { + return nil, err + } + buf.WriteString(`,"aliases":`) + buf.Write(aliasesJSON) + } + if s.doc != "" { + docJSON, err := jsoniter.Marshal(s.doc) + if err != nil { + return nil, err + } + buf.WriteString(`,"doc":`) + buf.Write(docJSON) + } + if s.isError { + buf.WriteString(`,"type":"error"`) + } else { + buf.WriteString(`,"type":"record"`) + } + fieldsJSON, err := jsoniter.Marshal(s.fields) + if err != nil { + return nil, err + } + buf.WriteString(`,"fields":`) + buf.Write(fieldsJSON) + if err := s.marshalPropertiesToJSON(buf); err != nil { + return nil, err + } + buf.WriteString("}") + return buf.Bytes(), nil +} + +// Fingerprint returns the SHA256 fingerprint of the schema. +func (s *RecordSchema) Fingerprint() [32]byte { + return s.fingerprinter.Fingerprint(s) +} + +// FingerprintUsing returns the fingerprint of the schema using the given algorithm or an error. +func (s *RecordSchema) FingerprintUsing(typ FingerprintType) ([]byte, error) { + return s.fingerprinter.FingerprintUsing(typ, s) +} + +// CacheFingerprint returns unique identity of the schema. +func (s *RecordSchema) CacheFingerprint() [32]byte { + return s.cacheFingerprinter.CacheFingerprint(s, func() []byte { + var defs []any + for _, field := range s.fields { + if !field.HasDefault() { + continue + } + defs = append(defs, field.Default()) + } + b, _ := jsoniter.Marshal(defs) + return b + }) +} + +// Field is an Avro record type field. +type Field struct { + properties + + name string + aliases []string + doc string + typ Schema + hasDef bool + def any + order Order + + // action mainly used when decoding data that lack the field for schema evolution purposes. + action Action + // encodedDef mainly used when decoding data that lack the field for schema evolution purposes. + // Its value remains empty unless the field's encodeDefault function is called. + encodedDef atomic.Value +} + +type noDef struct{} + +// NoDefault is used when no default exists for a field. +var NoDefault = noDef{} + +// NewField creates a new field instance. +func NewField(name string, typ Schema, opts ...SchemaOption) (*Field, error) { + cfg := schemaConfig{def: NoDefault} + for _, opt := range opts { + opt(&cfg) + } + + if err := validateName(name); err != nil { + return nil, err + } + for _, a := range cfg.aliases { + if err := validateName(a); err != nil { + return nil, err + } + } + + switch cfg.order { + case "": + cfg.order = Asc + case Asc, Desc, Ignore: + default: + return nil, fmt.Errorf("avro: field %q order %q is invalid", name, cfg.order) + } + + f := &Field{ + properties: newProperties(cfg.props, fieldReserved), + name: name, + aliases: cfg.aliases, + doc: cfg.doc, + typ: typ, + order: cfg.order, + } + + if cfg.def != NoDefault { + def, err := validateDefault(name, typ, cfg.def) + if err != nil { + return nil, err + } + f.def = def + f.hasDef = true + } + + return f, nil +} + +// Name returns the name of a field. +func (f *Field) Name() string { + return f.name +} + +// Aliases return the field aliases. +func (f *Field) Aliases() []string { + return f.aliases +} + +// Type returns the schema of a field. +func (f *Field) Type() Schema { + return f.typ +} + +// HasDefault determines if the field has a default value. +func (f *Field) HasDefault() bool { + return f.hasDef +} + +// Default returns the default of a field or nil. +// +// The only time a nil default is valid is for a Null Type. +func (f *Field) Default() any { + if f.def == nullDefault { + return nil + } + + return f.def +} + +func (f *Field) encodeDefault(encode func(any) ([]byte, error)) ([]byte, error) { + if v := f.encodedDef.Load(); v != nil { + return v.([]byte), nil + } + if !f.hasDef { + return nil, fmt.Errorf("avro: '%s' field must have a non-empty default value", f.name) + } + if encode == nil { + return nil, fmt.Errorf("avro: failed to encode '%s' default value", f.name) + } + b, err := encode(f.Default()) + if err != nil { + return nil, err + } + f.encodedDef.Store(b) + + return b, nil +} + +// Doc returns the documentation of a field. +func (f *Field) Doc() string { + return f.doc +} + +// Order returns the field order. +func (f *Field) Order() Order { + return f.order +} + +// String returns the canonical form of a field. +func (f *Field) String() string { + return `{"name":"` + f.name + `","type":` + f.typ.String() + `}` +} + +// MarshalJSON marshals the schema to json. +func (f *Field) MarshalJSON() ([]byte, error) { + buf := new(bytes.Buffer) + buf.WriteString(`{"name":"` + f.name + `"`) + if len(f.aliases) > 0 { + aliasesJSON, err := jsoniter.Marshal(f.aliases) + if err != nil { + return nil, err + } + buf.WriteString(`,"aliases":`) + buf.Write(aliasesJSON) + } + if f.doc != "" { + docJSON, err := jsoniter.Marshal(f.doc) + if err != nil { + return nil, err + } + buf.WriteString(`,"doc":`) + buf.Write(docJSON) + } + typeJSON, err := jsoniter.Marshal(f.typ) + if err != nil { + return nil, err + } + buf.WriteString(`,"type":`) + buf.Write(typeJSON) + if f.hasDef { + defaultValueJSON, err := jsoniter.Marshal(f.Default()) + if err != nil { + return nil, err + } + buf.WriteString(`,"default":`) + buf.Write(defaultValueJSON) + } + if f.order != "" && f.order != Asc { + buf.WriteString(`,"order":"` + string(f.order) + `"`) + } + if err := f.marshalPropertiesToJSON(buf); err != nil { + return nil, err + } + buf.WriteString("}") + return buf.Bytes(), nil +} + +// EnumSchema is an Avro enum type schema. +type EnumSchema struct { + name + properties + fingerprinter + cacheFingerprinter + + symbols []string + def string + doc string + + // encodedSymbols is the symbols of the encoded value. + // It's only used in the context of write-read schema resolution. + encodedSymbols []string +} + +// NewEnumSchema creates a new enum schema instance. +func NewEnumSchema(name, namespace string, symbols []string, opts ...SchemaOption) (*EnumSchema, error) { + var cfg schemaConfig + for _, opt := range opts { + opt(&cfg) + } + + n, err := newName(name, namespace, cfg.aliases) + if err != nil { + return nil, err + } + + if len(symbols) == 0 { + return nil, errors.New("avro: enum must have a non-empty array of symbols") + } + for _, sym := range symbols { + if err = validateName(sym); err != nil { + return nil, fmt.Errorf("avro: invalid symbol %q", sym) + } + } + + var def string + if d, ok := cfg.def.(string); ok && d != "" { + if !hasSymbol(symbols, d) { + return nil, fmt.Errorf("avro: symbol default %q must be a symbol", d) + } + def = d + } + + return &EnumSchema{ + name: n, + properties: newProperties(cfg.props, schemaReserved), + cacheFingerprinter: cacheFingerprinter{writerFingerprint: cfg.wfp}, + symbols: symbols, + def: def, + doc: cfg.doc, + }, nil +} + +func hasSymbol(symbols []string, sym string) bool { + for _, s := range symbols { + if s == sym { + return true + } + } + return false +} + +// Type returns the type of the schema. +func (s *EnumSchema) Type() Type { + return Enum +} + +// Doc returns the schema doc. +func (s *EnumSchema) Doc() string { + return s.doc +} + +// Symbols returns the symbols of an enum. +func (s *EnumSchema) Symbols() []string { + return s.symbols +} + +// Symbol returns the symbol for the given index. +// It might return the default value in the context of write-read schema resolution. +func (s *EnumSchema) Symbol(i int) (string, bool) { + resolv := len(s.encodedSymbols) > 0 + symbols := s.symbols + if resolv { + // A different set of symbols is encoded. + symbols = s.encodedSymbols + } + + if i < 0 || i >= len(symbols) { + return "", false + } + + symbol := symbols[i] + if resolv && !hasSymbol(s.symbols, symbol) { + if !s.HasDefault() { + return "", false + } + return s.Default(), true + } + return symbol, true +} + +// Default returns the default of an enum or an empty string. +func (s *EnumSchema) Default() string { + return s.def +} + +// HasDefault determines if the schema has a default value. +func (s *EnumSchema) HasDefault() bool { + return s.def != "" +} + +// String returns the canonical form of the schema. +func (s *EnumSchema) String() string { + symbols := "" + for _, sym := range s.symbols { + symbols += `"` + sym + `",` + } + if len(symbols) > 0 { + symbols = symbols[:len(symbols)-1] + } + + return `{"name":"` + s.FullName() + `","type":"enum","symbols":[` + symbols + `]}` +} + +// MarshalJSON marshals the schema to json. +func (s *EnumSchema) MarshalJSON() ([]byte, error) { + buf := new(bytes.Buffer) + buf.WriteString(`{"name":"` + s.full + `"`) + if len(s.aliases) > 0 { + aliasesJSON, err := jsoniter.Marshal(s.aliases) + if err != nil { + return nil, err + } + buf.WriteString(`,"aliases":`) + buf.Write(aliasesJSON) + } + if s.doc != "" { + docJSON, err := jsoniter.Marshal(s.doc) + if err != nil { + return nil, err + } + buf.WriteString(`,"doc":`) + buf.Write(docJSON) + } + buf.WriteString(`,"type":"enum"`) + symbolsJSON, err := jsoniter.Marshal(s.symbols) + if err != nil { + return nil, err + } + buf.WriteString(`,"symbols":`) + buf.Write(symbolsJSON) + if s.def != "" { + buf.WriteString(`,"default":"` + s.def + `"`) + } + if err := s.marshalPropertiesToJSON(buf); err != nil { + return nil, err + } + buf.WriteString("}") + return buf.Bytes(), nil +} + +// Fingerprint returns the SHA256 fingerprint of the schema. +func (s *EnumSchema) Fingerprint() [32]byte { + return s.fingerprinter.Fingerprint(s) +} + +// FingerprintUsing returns the fingerprint of the schema using the given algorithm or an error. +func (s *EnumSchema) FingerprintUsing(typ FingerprintType) ([]byte, error) { + return s.fingerprinter.FingerprintUsing(typ, s) +} + +// CacheFingerprint returns unique identity of the schema. +func (s *EnumSchema) CacheFingerprint() [32]byte { + return s.cacheFingerprinter.CacheFingerprint(s, func() []byte { + if !s.HasDefault() { + return []byte{} + } + return []byte(s.Default()) + }) +} + +// ArraySchema is an Avro array type schema. +type ArraySchema struct { + properties + fingerprinter + cacheFingerprinter + + items Schema +} + +// NewArraySchema creates an array schema instance. +func NewArraySchema(items Schema, opts ...SchemaOption) *ArraySchema { + var cfg schemaConfig + for _, opt := range opts { + opt(&cfg) + } + + return &ArraySchema{ + properties: newProperties(cfg.props, schemaReserved), + cacheFingerprinter: cacheFingerprinter{writerFingerprint: cfg.wfp}, + items: items, + } +} + +// Type returns the type of the schema. +func (s *ArraySchema) Type() Type { + return Array +} + +// Items returns the items schema of an array. +func (s *ArraySchema) Items() Schema { + return s.items +} + +// String returns the canonical form of the schema. +func (s *ArraySchema) String() string { + return `{"type":"array","items":` + s.items.String() + `}` +} + +// MarshalJSON marshals the schema to json. +func (s *ArraySchema) MarshalJSON() ([]byte, error) { + buf := new(bytes.Buffer) + buf.WriteString(`{"type":"array"`) + itemsJSON, err := jsoniter.Marshal(s.items) + if err != nil { + return nil, err + } + buf.WriteString(`,"items":`) + buf.Write(itemsJSON) + if err = s.marshalPropertiesToJSON(buf); err != nil { + return nil, err + } + buf.WriteString("}") + return buf.Bytes(), nil +} + +// Fingerprint returns the SHA256 fingerprint of the schema. +func (s *ArraySchema) Fingerprint() [32]byte { + return s.fingerprinter.Fingerprint(s) +} + +// FingerprintUsing returns the fingerprint of the schema using the given algorithm or an error. +func (s *ArraySchema) FingerprintUsing(typ FingerprintType) ([]byte, error) { + return s.fingerprinter.FingerprintUsing(typ, s) +} + +// CacheFingerprint returns unique identity of the schema. +func (s *ArraySchema) CacheFingerprint() [32]byte { + return s.cacheFingerprinter.CacheFingerprint(s, nil) +} + +// MapSchema is an Avro map type schema. +type MapSchema struct { + properties + fingerprinter + cacheFingerprinter + + values Schema +} + +// NewMapSchema creates a map schema instance. +func NewMapSchema(values Schema, opts ...SchemaOption) *MapSchema { + var cfg schemaConfig + for _, opt := range opts { + opt(&cfg) + } + + return &MapSchema{ + properties: newProperties(cfg.props, schemaReserved), + cacheFingerprinter: cacheFingerprinter{writerFingerprint: cfg.wfp}, + values: values, + } +} + +// Type returns the type of the schema. +func (s *MapSchema) Type() Type { + return Map +} + +// Values returns the values schema of a map. +func (s *MapSchema) Values() Schema { + return s.values +} + +// String returns the canonical form of the schema. +func (s *MapSchema) String() string { + return `{"type":"map","values":` + s.values.String() + `}` +} + +// MarshalJSON marshals the schema to json. +func (s *MapSchema) MarshalJSON() ([]byte, error) { + buf := new(bytes.Buffer) + buf.WriteString(`{"type":"map"`) + valuesJSON, err := jsoniter.Marshal(s.values) + if err != nil { + return nil, err + } + buf.WriteString(`,"values":`) + buf.Write(valuesJSON) + if err := s.marshalPropertiesToJSON(buf); err != nil { + return nil, err + } + buf.WriteString("}") + return buf.Bytes(), nil +} + +// Fingerprint returns the SHA256 fingerprint of the schema. +func (s *MapSchema) Fingerprint() [32]byte { + return s.fingerprinter.Fingerprint(s) +} + +// FingerprintUsing returns the fingerprint of the schema using the given algorithm or an error. +func (s *MapSchema) FingerprintUsing(typ FingerprintType) ([]byte, error) { + return s.fingerprinter.FingerprintUsing(typ, s) +} + +// CacheFingerprint returns unique identity of the schema. +func (s *MapSchema) CacheFingerprint() [32]byte { + return s.cacheFingerprinter.CacheFingerprint(s, nil) +} + +// UnionSchema is an Avro union type schema. +type UnionSchema struct { + fingerprinter + cacheFingerprinter + + types Schemas +} + +// NewUnionSchema creates a union schema instance. +func NewUnionSchema(types []Schema, opts ...SchemaOption) (*UnionSchema, error) { + var cfg schemaConfig + for _, opt := range opts { + opt(&cfg) + } + + seen := map[string]bool{} + for _, schema := range types { + if schema.Type() == Union { + return nil, errors.New("avro: union type cannot be a union") + } + + strType := schemaTypeName(schema) + + if seen[strType] { + return nil, errors.New("avro: union type must be unique") + } + seen[strType] = true + } + + return &UnionSchema{ + cacheFingerprinter: cacheFingerprinter{writerFingerprint: cfg.wfp}, + types: types, + }, nil +} + +// Type returns the type of the schema. +func (s *UnionSchema) Type() Type { + return Union +} + +// Types returns the types of a union. +func (s *UnionSchema) Types() Schemas { + return s.types +} + +// Nullable returns the Schema if the union is nullable, otherwise nil. +func (s *UnionSchema) Nullable() bool { + if len(s.types) != 2 || s.types[0].Type() != Null && s.types[1].Type() != Null { + return false + } + + return true +} + +// Indices returns the index of the null and type schemas for a +// nullable schema. For non-nullable schemas 0 is returned for +// both. +func (s *UnionSchema) Indices() (null, typ int) { + if !s.Nullable() { + return 0, 0 + } + if s.types[0].Type() == Null { + return 0, 1 + } + return 1, 0 +} + +// String returns the canonical form of the schema. +func (s *UnionSchema) String() string { + types := "" + for _, typ := range s.types { + types += typ.String() + "," + } + if len(types) > 0 { + types = types[:len(types)-1] + } + + return `[` + types + `]` +} + +// MarshalJSON marshals the schema to json. +func (s *UnionSchema) MarshalJSON() ([]byte, error) { + return jsoniter.Marshal(s.types) +} + +// Fingerprint returns the SHA256 fingerprint of the schema. +func (s *UnionSchema) Fingerprint() [32]byte { + return s.fingerprinter.Fingerprint(s) +} + +// FingerprintUsing returns the fingerprint of the schema using the given algorithm or an error. +func (s *UnionSchema) FingerprintUsing(typ FingerprintType) ([]byte, error) { + return s.fingerprinter.FingerprintUsing(typ, s) +} + +// CacheFingerprint returns unique identity of the schema. +func (s *UnionSchema) CacheFingerprint() [32]byte { + return s.cacheFingerprinter.CacheFingerprint(s, nil) +} + +// FixedSchema is an Avro fixed type schema. +type FixedSchema struct { + name + properties + fingerprinter + cacheFingerprinter + + size int + logical LogicalSchema +} + +// NewFixedSchema creates a new fixed schema instance. +func NewFixedSchema( + name, namespace string, + size int, + logical LogicalSchema, + opts ...SchemaOption, +) (*FixedSchema, error) { + var cfg schemaConfig + for _, opt := range opts { + opt(&cfg) + } + + n, err := newName(name, namespace, cfg.aliases) + if err != nil { + return nil, err + } + + return &FixedSchema{ + name: n, + properties: newProperties(cfg.props, schemaReserved), + cacheFingerprinter: cacheFingerprinter{writerFingerprint: cfg.wfp}, + size: size, + logical: logical, + }, nil +} + +// Type returns the type of the schema. +func (s *FixedSchema) Type() Type { + return Fixed +} + +// Size returns the number of bytes of the fixed schema. +func (s *FixedSchema) Size() int { + return s.size +} + +// Logical returns the logical schema or nil. +func (s *FixedSchema) Logical() LogicalSchema { + return s.logical +} + +// String returns the canonical form of the schema. +func (s *FixedSchema) String() string { + size := strconv.Itoa(s.size) + + var logical string + if s.logical != nil { + logical = "," + s.logical.String() + } + + return `{"name":"` + s.FullName() + `","type":"fixed","size":` + size + logical + `}` +} + +// MarshalJSON marshals the schema to json. +func (s *FixedSchema) MarshalJSON() ([]byte, error) { + buf := new(bytes.Buffer) + buf.WriteString(`{"name":"` + s.full + `"`) + if len(s.aliases) > 0 { + aliasesJSON, err := jsoniter.Marshal(s.aliases) + if err != nil { + return nil, err + } + buf.WriteString(`,"aliases":`) + buf.Write(aliasesJSON) + } + buf.WriteString(`,"type":"fixed"`) + buf.WriteString(`,"size":` + strconv.Itoa(s.size)) + if s.logical != nil { + buf.WriteString(`,"logicalType":"` + string(s.logical.Type()) + `"`) + if d, ok := s.logical.(*DecimalLogicalSchema); ok { + buf.WriteString(`,"precision":` + strconv.Itoa(d.prec)) + if d.scale > 0 { + buf.WriteString(`,"scale":` + strconv.Itoa(d.scale)) + } + } + } + if err := s.marshalPropertiesToJSON(buf); err != nil { + return nil, err + } + buf.WriteString("}") + return buf.Bytes(), nil +} + +// Fingerprint returns the SHA256 fingerprint of the schema. +func (s *FixedSchema) Fingerprint() [32]byte { + return s.fingerprinter.Fingerprint(s) +} + +// FingerprintUsing returns the fingerprint of the schema using the given algorithm or an error. +func (s *FixedSchema) FingerprintUsing(typ FingerprintType) ([]byte, error) { + return s.fingerprinter.FingerprintUsing(typ, s) +} + +// CacheFingerprint returns unique identity of the schema. +func (s *FixedSchema) CacheFingerprint() [32]byte { + return s.cacheFingerprinter.CacheFingerprint(s, nil) +} + +// NullSchema is an Avro null type schema. +type NullSchema struct { + fingerprinter +} + +// Type returns the type of the schema. +func (s *NullSchema) Type() Type { + return Null +} + +// String returns the canonical form of the schema. +func (s *NullSchema) String() string { + return `"null"` +} + +// MarshalJSON marshals the schema to json. +func (s *NullSchema) MarshalJSON() ([]byte, error) { + return []byte(`"null"`), nil +} + +// Fingerprint returns the SHA256 fingerprint of the schema. +func (s *NullSchema) Fingerprint() [32]byte { + return s.fingerprinter.Fingerprint(s) +} + +// FingerprintUsing returns the fingerprint of the schema using the given algorithm or an error. +func (s *NullSchema) FingerprintUsing(typ FingerprintType) ([]byte, error) { + return s.fingerprinter.FingerprintUsing(typ, s) +} + +// CacheFingerprint returns unique identity of the schema. +func (s *NullSchema) CacheFingerprint() [32]byte { + return s.Fingerprint() +} + +// RefSchema is a reference to a named Avro schema. +type RefSchema struct { + actual NamedSchema +} + +// NewRefSchema creates a ref schema instance. +func NewRefSchema(schema NamedSchema) *RefSchema { + return &RefSchema{ + actual: schema, + } +} + +// Type returns the type of the schema. +func (s *RefSchema) Type() Type { + return Ref +} + +// Schema returns the schema being referenced. +func (s *RefSchema) Schema() NamedSchema { + return s.actual +} + +// String returns the canonical form of the schema. +func (s *RefSchema) String() string { + return `"` + s.actual.FullName() + `"` +} + +// MarshalJSON marshals the schema to json. +func (s *RefSchema) MarshalJSON() ([]byte, error) { + return []byte(`"` + s.actual.FullName() + `"`), nil +} + +// Fingerprint returns the SHA256 fingerprint of the schema. +func (s *RefSchema) Fingerprint() [32]byte { + return s.actual.Fingerprint() +} + +// FingerprintUsing returns the fingerprint of the schema using the given algorithm or an error. +func (s *RefSchema) FingerprintUsing(typ FingerprintType) ([]byte, error) { + return s.actual.FingerprintUsing(typ) +} + +// CacheFingerprint returns unique identity of the schema. +func (s *RefSchema) CacheFingerprint() [32]byte { + return s.actual.CacheFingerprint() +} + +// PrimitiveLogicalSchema is a logical type with no properties. +type PrimitiveLogicalSchema struct { + typ LogicalType +} + +// NewPrimitiveLogicalSchema creates a new primitive logical schema instance. +func NewPrimitiveLogicalSchema(typ LogicalType) *PrimitiveLogicalSchema { + return &PrimitiveLogicalSchema{ + typ: typ, + } +} + +// Type returns the type of the logical schema. +func (s *PrimitiveLogicalSchema) Type() LogicalType { + return s.typ +} + +// String returns the canonical form of the logical schema. +func (s *PrimitiveLogicalSchema) String() string { + return `"logicalType":"` + string(s.typ) + `"` +} + +// DecimalLogicalSchema is a decimal logical type. +type DecimalLogicalSchema struct { + prec int + scale int +} + +// NewDecimalLogicalSchema creates a new decimal logical schema instance. +func NewDecimalLogicalSchema(prec, scale int) *DecimalLogicalSchema { + return &DecimalLogicalSchema{ + prec: prec, + scale: scale, + } +} + +// Type returns the type of the logical schema. +func (s *DecimalLogicalSchema) Type() LogicalType { + return Decimal +} + +// Precision returns the precision of the decimal logical schema. +func (s *DecimalLogicalSchema) Precision() int { + return s.prec +} + +// Scale returns the scale of the decimal logical schema. +func (s *DecimalLogicalSchema) Scale() int { + return s.scale +} + +// String returns the canonical form of the logical schema. +func (s *DecimalLogicalSchema) String() string { + var scale string + if s.scale > 0 { + scale = `,"scale":` + strconv.Itoa(s.scale) + } + precision := strconv.Itoa(s.prec) + + return `"logicalType":"` + string(Decimal) + `","precision":` + precision + scale +} + +func invalidNameFirstChar(r rune) bool { + return (r < 'A' || r > 'Z') && (r < 'a' || r > 'z') && r != '_' +} + +func invalidNameOtherChar(r rune) bool { + return invalidNameFirstChar(r) && (r < '0' || r > '9') +} + +func validateName(name string) error { + if name == "" { + return errors.New("name must be a non-empty") + } + + if SkipNameValidation { + return nil + } + + if strings.IndexFunc(name[:1], invalidNameFirstChar) > -1 { + return fmt.Errorf("invalid name %s", name) + } + if strings.IndexFunc(name[1:], invalidNameOtherChar) > -1 { + return fmt.Errorf("invalid name %s", name) + } + + return nil +} + +func validateDefault(name string, schema Schema, def any) (any, error) { + def, ok := isValidDefault(schema, def) + if !ok { + return nil, fmt.Errorf("avro: invalid default for field %s. %+v not a %s", name, def, schema.Type()) + } + return def, nil +} + +func isValidDefault(schema Schema, def any) (any, bool) { + switch schema.Type() { + case Ref: + ref := schema.(*RefSchema) + return isValidDefault(ref.Schema(), def) + case Null: + return nullDefault, def == nil + case Enum: + v, ok := def.(string) + if !ok || len(v) == 0 { + return def, false + } + + var found bool + for _, sym := range schema.(*EnumSchema).symbols { + if def == sym { + found = true + break + } + } + return def, found + case String: + if _, ok := def.(string); ok { + return def, true + } + case Bytes, Fixed: + // Spec: Default values for bytes and fixed fields are JSON strings, + // where Unicode code points 0-255 are mapped to unsigned 8-bit byte values 0-255. + if d, ok := def.(string); ok { + if b, ok := isValidDefaultBytes(d); ok { + if schema.Type() == Fixed { + return byteSliceToArray(b, schema.(*FixedSchema).Size()), true + } + return b, true + } + } + case Boolean: + if _, ok := def.(bool); ok { + return def, true + } + case Int: + if i, ok := def.(int8); ok { + return int(i), true + } + if i, ok := def.(int16); ok { + return int(i), true + } + if i, ok := def.(int32); ok { + return int(i), true + } + if _, ok := def.(int); ok { + return def, true + } + if f, ok := def.(float64); ok { + return int(f), true + } + case Long: + if _, ok := def.(int64); ok { + return def, true + } + if f, ok := def.(float64); ok { + return int64(f), true + } + case Float: + if _, ok := def.(float32); ok { + return def, true + } + if f, ok := def.(float64); ok { + return float32(f), true + } + case Double: + if _, ok := def.(float64); ok { + return def, true + } + case Array: + arr, ok := def.([]any) + if !ok { + return nil, false + } + + as := schema.(*ArraySchema) + for i, v := range arr { + v, ok := isValidDefault(as.Items(), v) + if !ok { + return nil, false + } + arr[i] = v + } + return arr, true + case Map: + m, ok := def.(map[string]any) + if !ok { + return nil, false + } + + ms := schema.(*MapSchema) + for k, v := range m { + v, ok := isValidDefault(ms.Values(), v) + if !ok { + return nil, false + } + + m[k] = v + } + return m, true + case Union: + unionSchema := schema.(*UnionSchema) + return isValidDefault(unionSchema.Types()[0], def) + case Record: + m, ok := def.(map[string]any) + if !ok { + return nil, false + } + + for _, field := range schema.(*RecordSchema).Fields() { + fieldDef := field.Default() + if newDef, ok := m[field.Name()]; ok { + fieldDef = newDef + } + + v, ok := isValidDefault(field.Type(), fieldDef) + if !ok { + return nil, false + } + + m[field.Name()] = v + } + return m, true + } + return nil, false +} + +func schemaTypeName(schema Schema) string { + if schema.Type() == Ref { + schema = schema.(*RefSchema).Schema() + } + + if n, ok := schema.(NamedSchema); ok { + return n.FullName() + } + + sname := string(schema.Type()) + if lt := getLogicalType(schema); lt != "" { + sname += "." + string(lt) + } + return sname +} + +func isValidDefaultBytes(def string) ([]byte, bool) { + runes := []rune(def) + l := len(runes) + b := make([]byte, l) + for i := 0; i < l; i++ { + if runes[i] < 0 || runes[i] > 255 { + return nil, false + } + b[i] = byte(runes[i]) + } + return b, true +} diff --git a/vendor/github.com/hamba/avro/v2/schema_compatibility.go b/vendor/github.com/hamba/avro/v2/schema_compatibility.go new file mode 100644 index 00000000..0b1d9ac3 --- /dev/null +++ b/vendor/github.com/hamba/avro/v2/schema_compatibility.go @@ -0,0 +1,487 @@ +package avro + +import ( + "errors" + "fmt" + "sync" +) + +type recursionError struct{} + +func (e recursionError) Error() string { + return "" +} + +type compatKey struct { + reader [32]byte + writer [32]byte +} + +// SchemaCompatibility determines the compatibility of schemas. +type SchemaCompatibility struct { + cache sync.Map // map[compatKey]error +} + +// NewSchemaCompatibility creates a new schema compatibility instance. +func NewSchemaCompatibility() *SchemaCompatibility { + return &SchemaCompatibility{} +} + +// Compatible determines the compatibility if the reader and writer schemas. +func (c *SchemaCompatibility) Compatible(reader, writer Schema) error { + return c.compatible(reader, writer) +} + +func (c *SchemaCompatibility) compatible(reader, writer Schema) error { + key := compatKey{reader: reader.Fingerprint(), writer: writer.Fingerprint()} + if err, ok := c.cache.Load(key); ok { + if _, ok := err.(recursionError); ok { + // Break the recursion here. + return nil + } + + if err == nil { + return nil + } + + return err.(error) + } + + c.cache.Store(key, recursionError{}) + err := c.match(reader, writer) + if err != nil { + // We dont want to pay the cost of fmt.Errorf every time + err = errors.New(err.Error()) + } + c.cache.Store(key, err) + return err +} + +func (c *SchemaCompatibility) match(reader, writer Schema) error { + // If the schema is a reference, get the actual schema + if reader.Type() == Ref { + reader = reader.(*RefSchema).Schema() + } + if writer.Type() == Ref { + writer = writer.(*RefSchema).Schema() + } + + if reader.Type() != writer.Type() { + if writer.Type() == Union { + // Reader must be compatible with all types in writer + for _, schema := range writer.(*UnionSchema).Types() { + if err := c.compatible(reader, schema); err != nil { + return err + } + } + + return nil + } + + if reader.Type() == Union { + // Writer must be compatible with at least one reader schema + var err error + for _, schema := range reader.(*UnionSchema).Types() { + err = c.compatible(schema, writer) + if err == nil { + return nil + } + } + + return fmt.Errorf("reader union lacking writer schema %s", writer.Type()) + } + + switch writer.Type() { + case Int: + if reader.Type() == Long || reader.Type() == Float || reader.Type() == Double { + return nil + } + + case Long: + if reader.Type() == Float || reader.Type() == Double { + return nil + } + + case Float: + if reader.Type() == Double { + return nil + } + + case String: + if reader.Type() == Bytes { + return nil + } + + case Bytes: + if reader.Type() == String { + return nil + } + } + + return fmt.Errorf("reader schema %s not compatible with writer schema %s", reader.Type(), writer.Type()) + } + + switch reader.Type() { + case Array: + return c.compatible(reader.(*ArraySchema).Items(), writer.(*ArraySchema).Items()) + + case Map: + return c.compatible(reader.(*MapSchema).Values(), writer.(*MapSchema).Values()) + + case Fixed: + r := reader.(*FixedSchema) + w := writer.(*FixedSchema) + + if err := c.checkSchemaName(r, w); err != nil { + return err + } + + if err := c.checkFixedSize(r, w); err != nil { + return err + } + + case Enum: + r := reader.(*EnumSchema) + w := writer.(*EnumSchema) + + if err := c.checkSchemaName(r, w); err != nil { + return err + } + + if err := c.checkEnumSymbols(r, w); err != nil { + if r.HasDefault() { + return nil + } + return err + } + + case Record: + r := reader.(*RecordSchema) + w := writer.(*RecordSchema) + + if err := c.checkSchemaName(r, w); err != nil { + return err + } + + if err := c.checkRecordFields(r, w); err != nil { + return err + } + + case Union: + for _, schema := range writer.(*UnionSchema).Types() { + if err := c.compatible(reader, schema); err != nil { + return err + } + } + } + + return nil +} + +func (c *SchemaCompatibility) checkSchemaName(reader, writer NamedSchema) error { + if reader.Name() != writer.Name() { + if c.contains(reader.Aliases(), writer.FullName()) { + return nil + } + return fmt.Errorf("reader schema %s and writer schema %s names do not match", reader.FullName(), writer.FullName()) + } + + return nil +} + +func (c *SchemaCompatibility) checkFixedSize(reader, writer *FixedSchema) error { + if reader.Size() != writer.Size() { + return fmt.Errorf("%s reader and writer fixed sizes do not match", reader.FullName()) + } + + return nil +} + +func (c *SchemaCompatibility) checkEnumSymbols(reader, writer *EnumSchema) error { + for _, symbol := range writer.Symbols() { + if !c.contains(reader.Symbols(), symbol) { + return fmt.Errorf("reader %s is missing symbol %s", reader.FullName(), symbol) + } + } + + return nil +} + +func (c *SchemaCompatibility) checkRecordFields(reader, writer *RecordSchema) error { + for _, field := range reader.Fields() { + f, ok := c.getField(writer.Fields(), field, func(gfo *getFieldOptions) { + gfo.fieldAlias = true + }) + if !ok { + if field.HasDefault() { + continue + } + + return fmt.Errorf("reader field %s is missing in writer schema and has no default", field.Name()) + } + + if err := c.compatible(field.Type(), f.Type()); err != nil { + return err + } + } + + return nil +} + +func (c *SchemaCompatibility) contains(a []string, s string) bool { + for _, str := range a { + if str == s { + return true + } + } + + return false +} + +type getFieldOptions struct { + fieldAlias bool + elemAlias bool +} + +func (c *SchemaCompatibility) getField(a []*Field, f *Field, optFns ...func(*getFieldOptions)) (*Field, bool) { + opt := getFieldOptions{} + for _, fn := range optFns { + fn(&opt) + } + for _, field := range a { + if field.Name() == f.Name() { + return field, true + } + if opt.fieldAlias { + if c.contains(f.Aliases(), field.Name()) { + return field, true + } + } + if opt.elemAlias { + if c.contains(field.Aliases(), f.Name()) { + return field, true + } + } + } + + return nil, false +} + +// Resolve returns a composite schema that allows decoding data written by the writer schema, +// and makes necessary adjustments to support the reader schema. +// +// It fails if the writer and reader schemas are not compatible. +func (c *SchemaCompatibility) Resolve(reader, writer Schema) (Schema, error) { + if err := c.compatible(reader, writer); err != nil { + return nil, err + } + + schema, _, err := c.resolve(reader, writer) + return schema, err +} + +// resolve requires the reader's schema to be already compatible with the writer's. +func (c *SchemaCompatibility) resolve(reader, writer Schema) (schema Schema, resolved bool, err error) { + if reader.Type() == Ref { + reader = reader.(*RefSchema).Schema() + } + if writer.Type() == Ref { + writer = writer.(*RefSchema).Schema() + } + + if writer.Type() != reader.Type() { + if reader.Type() == Union { + for _, schema := range reader.(*UnionSchema).Types() { + // Compatibility is not guaranteed for every Union reader schema. + // Therefore, we need to check compatibility in every iteration. + if err := c.compatible(schema, writer); err != nil { + continue + } + sch, _, err := c.resolve(schema, writer) + if err != nil { + continue + } + return sch, true, nil + } + + return nil, false, fmt.Errorf("reader union lacking writer schema %s", writer.Type()) + } + + if writer.Type() == Union { + schemas := make([]Schema, 0) + for _, schema := range writer.(*UnionSchema).Types() { + sch, _, err := c.resolve(reader, schema) + if err != nil { + return nil, false, err + } + schemas = append(schemas, sch) + } + s, err := NewUnionSchema(schemas, withWriterFingerprint(writer.Fingerprint())) + return s, true, err + } + + if isPromotable(writer.Type(), reader.Type()) { + r := NewPrimitiveSchema(reader.Type(), reader.(*PrimitiveSchema).Logical(), + withWriterFingerprint(writer.Fingerprint()), + ) + r.encodedType = writer.Type() + return r, true, nil + } + + return nil, false, fmt.Errorf("failed to resolve composite schema for %s and %s", reader.Type(), writer.Type()) + } + + if isNative(writer.Type()) { + return reader, false, nil + } + + if writer.Type() == Enum { + r := reader.(*EnumSchema) + w := writer.(*EnumSchema) + if err = c.checkEnumSymbols(r, w); err != nil { + if r.HasDefault() { + enum, _ := NewEnumSchema(r.Name(), r.Namespace(), r.Symbols(), + WithAliases(r.Aliases()), + WithDefault(r.Default()), + withWriterFingerprint(w.Fingerprint()), + ) + enum.encodedSymbols = w.Symbols() + return enum, true, nil + } + + return nil, false, err + } + return reader, false, nil + } + + if writer.Type() == Fixed { + return reader, false, nil + } + + if writer.Type() == Union { + schemas := make([]Schema, 0) + for _, s := range writer.(*UnionSchema).Types() { + sch, resolv, err := c.resolve(reader, s) + if err != nil { + return nil, false, err + } + schemas = append(schemas, sch) + resolved = resolv || resolved + } + s, err := NewUnionSchema(schemas, withWriterFingerprintIfResolved(writer.Fingerprint(), resolved)) + if err != nil { + return nil, false, err + } + return s, resolved, nil + } + + if writer.Type() == Array { + schema, resolved, err = c.resolve(reader.(*ArraySchema).Items(), writer.(*ArraySchema).Items()) + if err != nil { + return nil, false, err + } + return NewArraySchema(schema, withWriterFingerprintIfResolved(writer.Fingerprint(), resolved)), resolved, nil + } + + if writer.Type() == Map { + schema, resolved, err = c.resolve(reader.(*MapSchema).Values(), writer.(*MapSchema).Values()) + if err != nil { + return nil, false, err + } + return NewMapSchema(schema, withWriterFingerprintIfResolved(writer.Fingerprint(), resolved)), resolved, nil + } + + if writer.Type() == Record { + return c.resolveRecord(reader, writer) + } + + return nil, false, fmt.Errorf("failed to resolve composite schema for %s and %s", reader.Type(), writer.Type()) +} + +func (c *SchemaCompatibility) resolveRecord(reader, writer Schema) (Schema, bool, error) { + w := writer.(*RecordSchema) + r := reader.(*RecordSchema) + + fields := make([]*Field, 0) + seen := make(map[string]struct{}) + + var resolved bool + for _, wf := range w.Fields() { + rf, ok := c.getField(r.Fields(), wf, func(gfo *getFieldOptions) { + gfo.elemAlias = true + }) + if !ok { + // The field was not found in the reader schema, it should be ignored. + f, _ := NewField(wf.Name(), wf.Type(), WithAliases(wf.aliases), WithOrder(wf.order)) + f.def = wf.def + f.hasDef = wf.hasDef + f.action = FieldIgnore + fields = append(fields, f) + + resolved = true + continue + } + + ft, resolv, err := c.resolve(rf.Type(), wf.Type()) + if err != nil { + return nil, false, err + } + f, _ := NewField(rf.Name(), ft, WithAliases(rf.aliases), WithOrder(rf.order)) + f.def = rf.def + f.hasDef = rf.hasDef + fields = append(fields, f) + resolved = resolv || resolved + + seen[rf.Name()] = struct{}{} + } + + for _, rf := range r.Fields() { + if _, ok := seen[rf.Name()]; ok { + // This field has already been seen. + continue + } + + // The schemas are already known to be compatible, so there must be a default on + // the field in the writer. Use the default. + + f, _ := NewField(rf.Name(), rf.Type(), WithAliases(rf.aliases), WithOrder(rf.order)) + f.def = rf.def + f.hasDef = rf.hasDef + f.action = FieldSetDefault + fields = append(fields, f) + + resolved = true + } + + schema, err := NewRecordSchema(r.Name(), r.Namespace(), fields, + WithAliases(r.Aliases()), + withWriterFingerprintIfResolved(writer.Fingerprint(), resolved), + ) + return schema, resolved, err +} + +func isNative(typ Type) bool { + switch typ { + case Null, Boolean, Int, Long, Float, Double, Bytes, String: + return true + default: + return false + } +} + +func isPromotable(writerTyp, readerType Type) bool { + switch writerTyp { + case Int: + return readerType == Long || readerType == Float || readerType == Double + case Long: + return readerType == Float || readerType == Double + case Float: + return readerType == Double + case String: + return readerType == Bytes + case Bytes: + return readerType == String + default: + return false + } +} diff --git a/vendor/github.com/hamba/avro/v2/schema_parse.go b/vendor/github.com/hamba/avro/v2/schema_parse.go new file mode 100644 index 00000000..76e21e11 --- /dev/null +++ b/vendor/github.com/hamba/avro/v2/schema_parse.go @@ -0,0 +1,592 @@ +package avro + +import ( + "errors" + "fmt" + "math" + "os" + "path/filepath" + "strings" + + jsoniter "github.com/json-iterator/go" + "github.com/mitchellh/mapstructure" +) + +// DefaultSchemaCache is the default cache for schemas. +var DefaultSchemaCache = &SchemaCache{} + +// SkipNameValidation sets whether to skip name validation. +// Avro spec incurs a strict naming convention for names and aliases, however official Avro tools do not follow that +// More info: +// https://lists.apache.org/thread/39v98os6wdpyr6w31xdkz0yzol51fsrr +// https://github.com/apache/avro/pull/1995 +var SkipNameValidation = false + +// Parse parses a schema string. +func Parse(schema string) (Schema, error) { + return ParseBytes([]byte(schema)) +} + +// ParseWithCache parses a schema string using the given namespace and schema cache. +func ParseWithCache(schema, namespace string, cache *SchemaCache) (Schema, error) { + return ParseBytesWithCache([]byte(schema), namespace, cache) +} + +// MustParse parses a schema string, panicking if there is an error. +func MustParse(schema string) Schema { + parsed, err := Parse(schema) + if err != nil { + panic(err) + } + + return parsed +} + +// ParseFiles parses the schemas in the files, in the order they appear, returning the last schema. +// +// This is useful when your schemas rely on other schemas. +func ParseFiles(paths ...string) (Schema, error) { + var schema Schema + for _, path := range paths { + s, err := os.ReadFile(filepath.Clean(path)) + if err != nil { + return nil, err + } + + schema, err = Parse(string(s)) + if err != nil { + return nil, err + } + } + + return schema, nil +} + +// ParseBytes parses a schema byte slice. +func ParseBytes(schema []byte) (Schema, error) { + return ParseBytesWithCache(schema, "", DefaultSchemaCache) +} + +// ParseBytesWithCache parses a schema byte slice using the given namespace and schema cache. +func ParseBytesWithCache(schema []byte, namespace string, cache *SchemaCache) (Schema, error) { + var json any + if err := jsoniter.Unmarshal(schema, &json); err != nil { + json = string(schema) + } + + seen := seenCache{} + s, err := parseType(namespace, json, seen, cache) + if err != nil { + return nil, err + } + return derefSchema(s), nil +} + +func parseType(namespace string, v any, seen seenCache, cache *SchemaCache) (Schema, error) { + switch val := v.(type) { + case nil: + return &NullSchema{}, nil + + case string: + return parsePrimitiveType(namespace, val, cache) + + case map[string]any: + return parseComplexType(namespace, val, seen, cache) + + case []any: + return parseUnion(namespace, val, seen, cache) + } + + return nil, fmt.Errorf("avro: unknown type: %v", v) +} + +func parsePrimitiveType(namespace, s string, cache *SchemaCache) (Schema, error) { + typ := Type(s) + switch typ { + case Null: + return &NullSchema{}, nil + + case String, Bytes, Int, Long, Float, Double, Boolean: + return parsePrimitive(typ, nil) + + default: + schema := cache.Get(fullName(namespace, s)) + if schema != nil { + return schema, nil + } + + return nil, fmt.Errorf("avro: unknown type: %s", s) + } +} + +func parseComplexType(namespace string, m map[string]any, seen seenCache, cache *SchemaCache) (Schema, error) { + if val, ok := m["type"].([]any); ok { + return parseUnion(namespace, val, seen, cache) + } + + str, ok := m["type"].(string) + if !ok { + return nil, fmt.Errorf("avro: unknown type: %+v", m) + } + typ := Type(str) + + switch typ { + case Null: + return &NullSchema{}, nil + + case String, Bytes, Int, Long, Float, Double, Boolean: + return parsePrimitive(typ, m) + + case Record, Error: + return parseRecord(typ, namespace, m, seen, cache) + + case Enum: + return parseEnum(namespace, m, seen, cache) + + case Array: + return parseArray(namespace, m, seen, cache) + + case Map: + return parseMap(namespace, m, seen, cache) + + case Fixed: + return parseFixed(namespace, m, seen, cache) + + default: + return parseType(namespace, string(typ), seen, cache) + } +} + +type primitiveSchema struct { + LogicalType string `mapstructure:"logicalType"` + Precision int `mapstructure:"precision"` + Scale int `mapstructure:"scale"` + Props map[string]any `mapstructure:",remain"` +} + +func parsePrimitive(typ Type, m map[string]any) (Schema, error) { + if m == nil { + return NewPrimitiveSchema(typ, nil), nil + } + + var ( + p primitiveSchema + meta mapstructure.Metadata + ) + if err := decodeMap(m, &p, &meta); err != nil { + return nil, fmt.Errorf("avro: error decoding primitive: %w", err) + } + + var logical LogicalSchema + if p.LogicalType != "" { + logical = parsePrimitiveLogicalType(typ, p.LogicalType, p.Precision, p.Scale) + } + + return NewPrimitiveSchema(typ, logical, WithProps(p.Props)), nil +} + +func parsePrimitiveLogicalType(typ Type, lt string, prec, scale int) LogicalSchema { + ltyp := LogicalType(lt) + if (typ == String && ltyp == UUID) || + (typ == Int && ltyp == Date) || + (typ == Int && ltyp == TimeMillis) || + (typ == Long && ltyp == TimeMicros) || + (typ == Long && ltyp == TimestampMillis) || + (typ == Long && ltyp == TimestampMicros) || + (typ == Long && ltyp == LocalTimestampMillis) || + (typ == Long && ltyp == LocalTimestampMicros) { + return NewPrimitiveLogicalSchema(ltyp) + } + + if typ == Bytes && ltyp == Decimal { + return parseDecimalLogicalType(-1, prec, scale) + } + + return nil +} + +type recordSchema struct { + Type string `mapstructure:"type"` + Name string `mapstructure:"name"` + Namespace string `mapstructure:"namespace"` + Aliases []string `mapstructure:"aliases"` + Doc string `mapstructure:"doc"` + Fields []map[string]any `mapstructure:"fields"` + Props map[string]any `mapstructure:",remain"` +} + +func parseRecord(typ Type, namespace string, m map[string]any, seen seenCache, cache *SchemaCache) (Schema, error) { + var ( + r recordSchema + meta mapstructure.Metadata + ) + if err := decodeMap(m, &r, &meta); err != nil { + return nil, fmt.Errorf("avro: error decoding record: %w", err) + } + + if err := checkParsedName(r.Name, r.Namespace, hasKey(meta.Keys, "namespace")); err != nil { + return nil, err + } + if r.Namespace == "" { + r.Namespace = namespace + } + + if !hasKey(meta.Keys, "fields") { + return nil, errors.New("avro: record must have an array of fields") + } + fields := make([]*Field, len(r.Fields)) + + var ( + rec *RecordSchema + err error + ) + switch typ { + case Record: + rec, err = NewRecordSchema(r.Name, r.Namespace, fields, + WithAliases(r.Aliases), WithDoc(r.Doc), WithProps(r.Props), + ) + case Error: + rec, err = NewErrorRecordSchema(r.Name, r.Namespace, fields, + WithAliases(r.Aliases), WithDoc(r.Doc), WithProps(r.Props), + ) + } + if err != nil { + return nil, err + } + + if err = seen.Add(rec.FullName()); err != nil { + return nil, err + } + + ref := NewRefSchema(rec) + cache.Add(rec.FullName(), ref) + for _, alias := range rec.Aliases() { + cache.Add(alias, ref) + } + + for i, f := range r.Fields { + field, err := parseField(rec.namespace, f, seen, cache) + if err != nil { + return nil, err + } + fields[i] = field + } + + return rec, nil +} + +type fieldSchema struct { + Name string `mapstructure:"name"` + Aliases []string `mapstructure:"aliases"` + Type any `mapstructure:"type"` + Doc string `mapstructure:"doc"` + Default any `mapstructure:"default"` + Order Order `mapstructure:"order"` + Props map[string]any `mapstructure:",remain"` +} + +func parseField(namespace string, m map[string]any, seen seenCache, cache *SchemaCache) (*Field, error) { + var ( + f fieldSchema + meta mapstructure.Metadata + ) + if err := decodeMap(m, &f, &meta); err != nil { + return nil, fmt.Errorf("avro: error decoding field: %w", err) + } + + if err := checkParsedName(f.Name, "", false); err != nil { + return nil, err + } + + if !hasKey(meta.Keys, "type") { + return nil, errors.New("avro: field requires a type") + } + typ, err := parseType(namespace, f.Type, seen, cache) + if err != nil { + return nil, err + } + + if !hasKey(meta.Keys, "default") { + f.Default = NoDefault + } + + field, err := NewField(f.Name, typ, + WithDefault(f.Default), WithAliases(f.Aliases), WithDoc(f.Doc), WithOrder(f.Order), WithProps(f.Props), + ) + if err != nil { + return nil, err + } + + return field, nil +} + +type enumSchema struct { + Name string `mapstructure:"name"` + Namespace string `mapstructure:"namespace"` + Aliases []string `mapstructure:"aliases"` + Type string `mapstructure:"type"` + Doc string `mapstructure:"doc"` + Symbols []string `mapstructure:"symbols"` + Default string `mapstructure:"default"` + Props map[string]any `mapstructure:",remain"` +} + +func parseEnum(namespace string, m map[string]any, seen seenCache, cache *SchemaCache) (Schema, error) { + var ( + e enumSchema + meta mapstructure.Metadata + ) + if err := decodeMap(m, &e, &meta); err != nil { + return nil, fmt.Errorf("avro: error decoding enum: %w", err) + } + + if err := checkParsedName(e.Name, e.Namespace, hasKey(meta.Keys, "namespace")); err != nil { + return nil, err + } + if e.Namespace == "" { + e.Namespace = namespace + } + + enum, err := NewEnumSchema(e.Name, e.Namespace, e.Symbols, + WithDefault(e.Default), WithAliases(e.Aliases), WithDoc(e.Doc), WithProps(e.Props), + ) + if err != nil { + return nil, err + } + + if err = seen.Add(enum.FullName()); err != nil { + return nil, err + } + + ref := NewRefSchema(enum) + cache.Add(enum.FullName(), ref) + for _, alias := range enum.Aliases() { + cache.Add(alias, enum) + } + + return enum, nil +} + +type arraySchema struct { + Items any `mapstructure:"items"` + Props map[string]any `mapstructure:",remain"` +} + +func parseArray(namespace string, m map[string]any, seen seenCache, cache *SchemaCache) (Schema, error) { + var ( + a arraySchema + meta mapstructure.Metadata + ) + if err := decodeMap(m, &a, &meta); err != nil { + return nil, fmt.Errorf("avro: error decoding array: %w", err) + } + + if !hasKey(meta.Keys, "items") { + return nil, errors.New("avro: array must have an items key") + } + schema, err := parseType(namespace, a.Items, seen, cache) + if err != nil { + return nil, err + } + + return NewArraySchema(schema, WithProps(a.Props)), nil +} + +type mapSchema struct { + Values any `mapstructure:"values"` + Props map[string]any `mapstructure:",remain"` +} + +func parseMap(namespace string, m map[string]any, seen seenCache, cache *SchemaCache) (Schema, error) { + var ( + ms mapSchema + meta mapstructure.Metadata + ) + if err := decodeMap(m, &ms, &meta); err != nil { + return nil, fmt.Errorf("avro: error decoding map: %w", err) + } + + if !hasKey(meta.Keys, "values") { + return nil, errors.New("avro: map must have an values key") + } + schema, err := parseType(namespace, ms.Values, seen, cache) + if err != nil { + return nil, err + } + + return NewMapSchema(schema, WithProps(ms.Props)), nil +} + +func parseUnion(namespace string, v []any, seen seenCache, cache *SchemaCache) (Schema, error) { + var err error + types := make([]Schema, len(v)) + for i := range v { + types[i], err = parseType(namespace, v[i], seen, cache) + if err != nil { + return nil, err + } + } + + return NewUnionSchema(types) +} + +type fixedSchema struct { + Name string `mapstructure:"name"` + Namespace string `mapstructure:"namespace"` + Aliases []string `mapstructure:"aliases"` + Type string `mapstructure:"type"` + Size int `mapstructure:"size"` + LogicalType string `mapstructure:"logicalType"` + Precision int `mapstructure:"precision"` + Scale int `mapstructure:"scale"` + Props map[string]any `mapstructure:",remain"` +} + +func parseFixed(namespace string, m map[string]any, seen seenCache, cache *SchemaCache) (Schema, error) { + var ( + f fixedSchema + meta mapstructure.Metadata + ) + if err := decodeMap(m, &f, &meta); err != nil { + return nil, fmt.Errorf("avro: error decoding fixed: %w", err) + } + + if err := checkParsedName(f.Name, f.Namespace, hasKey(meta.Keys, "namespace")); err != nil { + return nil, err + } + if f.Namespace == "" { + f.Namespace = namespace + } + + if !hasKey(meta.Keys, "size") { + return nil, errors.New("avro: fixed must have a size") + } + + var logical LogicalSchema + if f.LogicalType != "" { + logical = parseFixedLogicalType(f.Size, f.LogicalType, f.Precision, f.Scale) + } + + fixed, err := NewFixedSchema(f.Name, f.Namespace, f.Size, logical, WithAliases(f.Aliases), WithProps(f.Props)) + if err != nil { + return nil, err + } + + if err = seen.Add(fixed.FullName()); err != nil { + return nil, err + } + + ref := NewRefSchema(fixed) + cache.Add(fixed.FullName(), ref) + for _, alias := range fixed.Aliases() { + cache.Add(alias, fixed) + } + + return fixed, nil +} + +func parseFixedLogicalType(size int, lt string, prec, scale int) LogicalSchema { + ltyp := LogicalType(lt) + switch { + case ltyp == Duration && size == 12: + return NewPrimitiveLogicalSchema(Duration) + case ltyp == Decimal: + return parseDecimalLogicalType(size, prec, scale) + } + + return nil +} + +func parseDecimalLogicalType(size, prec, scale int) LogicalSchema { + if prec <= 0 { + return nil + } + + if size > 0 { + maxPrecision := int(math.Round(math.Floor(math.Log10(2) * (8*float64(size) - 1)))) + if prec > maxPrecision { + return nil + } + } + + if scale < 0 { + return nil + } + + // Scale may not be bigger than precision + if scale > prec { + return nil + } + + return NewDecimalLogicalSchema(prec, scale) +} + +func fullName(namespace, name string) string { + if len(namespace) == 0 || strings.ContainsRune(name, '.') { + return name + } + + return namespace + "." + name +} + +func checkParsedName(name, ns string, hasNS bool) error { + if name == "" { + return errors.New("avro: non-empty name key required") + } + if hasNS && ns == "" { + return errors.New("avro: namespace key must be non-empty or omitted") + } + return nil +} + +func hasKey(keys []string, k string) bool { + for _, key := range keys { + if key == k { + return true + } + } + return false +} + +func decodeMap(in, v any, meta *mapstructure.Metadata) error { + cfg := &mapstructure.DecoderConfig{ + ZeroFields: true, + Metadata: meta, + Result: v, + } + + decoder, _ := mapstructure.NewDecoder(cfg) + return decoder.Decode(in) +} + +func derefSchema(schema Schema) Schema { + seen := map[string]struct{}{} + + return walkSchema(schema, func(schema Schema) Schema { + if ns, ok := schema.(NamedSchema); ok { + seen[ns.FullName()] = struct{}{} + return schema + } + + ref, isRef := schema.(*RefSchema) + if !isRef { + return schema + } + + if _, haveSeen := seen[ref.Schema().FullName()]; !haveSeen { + seen[ref.Schema().FullName()] = struct{}{} + return ref.Schema() + } + return schema + }) +} + +type seenCache map[string]struct{} + +func (c seenCache) Add(name string) error { + if _, ok := c[name]; ok { + return fmt.Errorf("duplicate name %q", name) + } + c[name] = struct{}{} + return nil +} diff --git a/vendor/github.com/hamba/avro/v2/schema_walk.go b/vendor/github.com/hamba/avro/v2/schema_walk.go new file mode 100644 index 00000000..253740cc --- /dev/null +++ b/vendor/github.com/hamba/avro/v2/schema_walk.go @@ -0,0 +1,21 @@ +package avro + +func walkSchema(schema Schema, fn func(Schema) Schema) Schema { + schema = fn(schema) + + switch s := schema.(type) { + case *RecordSchema: + for _, f := range s.Fields() { + f.typ = walkSchema(f.typ, fn) + } + case *ArraySchema: + s.items = walkSchema(s.items, fn) + case *MapSchema: + s.values = walkSchema(s.values, fn) + case *UnionSchema: + for i, st := range s.types { + s.types[i] = walkSchema(st, fn) + } + } + return schema +} diff --git a/vendor/github.com/hamba/avro/v2/types.go b/vendor/github.com/hamba/avro/v2/types.go new file mode 100644 index 00000000..931ea99e --- /dev/null +++ b/vendor/github.com/hamba/avro/v2/types.go @@ -0,0 +1,9 @@ +package avro + +// LogicalDuration represents the `duration` logical type, as defined in +// https://avro.apache.org/docs/1.11.1/specification/#duration +type LogicalDuration struct { + Months uint32 + Days uint32 + Milliseconds uint32 +} diff --git a/vendor/github.com/hamba/avro/v2/writer.go b/vendor/github.com/hamba/avro/v2/writer.go new file mode 100644 index 00000000..a12d6d0d --- /dev/null +++ b/vendor/github.com/hamba/avro/v2/writer.go @@ -0,0 +1,194 @@ +package avro + +import ( + "encoding/binary" + "io" + "math" +) + +// WriterFunc is a function used to customize the Writer. +type WriterFunc func(w *Writer) + +// WithWriterConfig specifies the configuration to use with a writer. +func WithWriterConfig(cfg API) WriterFunc { + return func(w *Writer) { + w.cfg = cfg.(*frozenConfig) + } +} + +// Writer is an Avro specific io.Writer. +type Writer struct { + cfg *frozenConfig + out io.Writer + buf []byte + Error error +} + +// NewWriter creates a new Writer. +func NewWriter(out io.Writer, bufSize int, opts ...WriterFunc) *Writer { + writer := &Writer{ + cfg: DefaultConfig.(*frozenConfig), + out: out, + buf: make([]byte, 0, bufSize), + Error: nil, + } + + for _, opt := range opts { + opt(writer) + } + + return writer +} + +// Reset resets the Writer with a new io.Writer attached. +func (w *Writer) Reset(out io.Writer) { + w.out = out + w.buf = w.buf[:0] +} + +// Buffered returns the number of buffered bytes. +func (w *Writer) Buffered() int { + return len(w.buf) +} + +// Buffer gets the Writer buffer. +func (w *Writer) Buffer() []byte { + return w.buf +} + +// Flush writes any buffered data to the underlying io.Writer. +func (w *Writer) Flush() error { + if w.out == nil { + return nil + } + if w.Error != nil { + return w.Error + } + + n, err := w.out.Write(w.buf) + if n < len(w.buf) && err == nil { + err = io.ErrShortWrite + } + if err != nil { + if w.Error == nil { + w.Error = err + } + return err + } + + w.buf = w.buf[:0] + + return nil +} + +func (w *Writer) writeByte(b byte) { + w.buf = append(w.buf, b) +} + +// Write writes raw bytes to the Writer. +func (w *Writer) Write(b []byte) (int, error) { + w.buf = append(w.buf, b...) + return len(b), nil +} + +// WriteBool writes a Bool to the Writer. +func (w *Writer) WriteBool(b bool) { + if b { + w.writeByte(0x01) + return + } + w.writeByte(0x00) +} + +// WriteInt writes an Int to the Writer. +func (w *Writer) WriteInt(i int32) { + e := uint64((uint32(i) << 1) ^ uint32(i>>31)) + w.encodeInt(e) +} + +// WriteLong writes a Long to the Writer. +func (w *Writer) WriteLong(i int64) { + e := (uint64(i) << 1) ^ uint64(i>>63) + w.encodeInt(e) +} + +func (w *Writer) encodeInt(i uint64) { + if i == 0 { + w.writeByte(0) + return + } + + for i > 0 { + b := byte(i) & 0x7F + i >>= 7 + + if i != 0 { + b |= 0x80 + } + w.writeByte(b) + } +} + +// WriteFloat writes a Float to the Writer. +func (w *Writer) WriteFloat(f float32) { + b := make([]byte, 4) + binary.LittleEndian.PutUint32(b, math.Float32bits(f)) + + w.buf = append(w.buf, b...) +} + +// WriteDouble writes a Double to the Writer. +func (w *Writer) WriteDouble(f float64) { + b := make([]byte, 8) + binary.LittleEndian.PutUint64(b, math.Float64bits(f)) + + w.buf = append(w.buf, b...) +} + +// WriteBytes writes Bytes to the Writer. +func (w *Writer) WriteBytes(b []byte) { + w.WriteLong(int64(len(b))) + w.buf = append(w.buf, b...) +} + +// WriteString reads a String to the Writer. +func (w *Writer) WriteString(s string) { + w.WriteLong(int64(len(s))) + w.buf = append(w.buf, s...) +} + +// WriteBlockHeader writes a Block Header to the Writer. +func (w *Writer) WriteBlockHeader(l, s int64) { + if s > 0 && !w.cfg.config.DisableBlockSizeHeader { + w.WriteLong(-l) + w.WriteLong(s) + return + } + w.WriteLong(l) +} + +// WriteBlockCB writes a block using the callback. +func (w *Writer) WriteBlockCB(callback func(w *Writer) int64) int64 { + var dummyHeader [18]byte + headerStart := len(w.buf) + + // Write dummy header + _, _ = w.Write(dummyHeader[:]) + + // Write block data + capturedAt := len(w.buf) + length := callback(w) + size := int64(len(w.buf) - capturedAt) + + // Take a reference to the block data + captured := w.buf[capturedAt:len(w.buf)] + + // Rewrite the header + w.buf = w.buf[:headerStart] + w.WriteBlockHeader(length, size) + + // Copy the block data back to its position + w.buf = append(w.buf, captured...) + + return length +} diff --git a/vendor/github.com/klauspost/compress/.gitattributes b/vendor/github.com/klauspost/compress/.gitattributes new file mode 100644 index 00000000..40243359 --- /dev/null +++ b/vendor/github.com/klauspost/compress/.gitattributes @@ -0,0 +1,2 @@ +* -text +*.bin -text -diff diff --git a/vendor/github.com/klauspost/compress/.gitignore b/vendor/github.com/klauspost/compress/.gitignore new file mode 100644 index 00000000..d31b3781 --- /dev/null +++ b/vendor/github.com/klauspost/compress/.gitignore @@ -0,0 +1,32 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +/s2/cmd/_s2sx/sfx-exe + +# Linux perf files +perf.data +perf.data.old + +# gdb history +.gdb_history diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml new file mode 100644 index 00000000..a2295380 --- /dev/null +++ b/vendor/github.com/klauspost/compress/.goreleaser.yml @@ -0,0 +1,123 @@ +# This is an example goreleaser.yaml file with some sane defaults. +# Make sure to check the documentation at http://goreleaser.com +before: + hooks: + - ./gen.sh + +builds: + - + id: "s2c" + binary: s2c + main: ./s2/cmd/s2c/main.go + flags: + - -trimpath + env: + - CGO_ENABLED=0 + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm + - arm64 + - ppc64 + - ppc64le + - mips64 + - mips64le + goarm: + - 7 + - + id: "s2d" + binary: s2d + main: ./s2/cmd/s2d/main.go + flags: + - -trimpath + env: + - CGO_ENABLED=0 + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm + - arm64 + - ppc64 + - ppc64le + - mips64 + - mips64le + goarm: + - 7 + - + id: "s2sx" + binary: s2sx + main: ./s2/cmd/_s2sx/main.go + flags: + - -modfile=s2sx.mod + - -trimpath + env: + - CGO_ENABLED=0 + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm + - arm64 + - ppc64 + - ppc64le + - mips64 + - mips64le + goarm: + - 7 + +archives: + - + id: s2-binaries + name_template: "s2-{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" + format_overrides: + - goos: windows + format: zip + files: + - unpack/* + - s2/LICENSE + - s2/README.md +checksum: + name_template: 'checksums.txt' +snapshot: + name_template: "{{ .Tag }}-next" +changelog: + sort: asc + filters: + exclude: + - '^doc:' + - '^docs:' + - '^test:' + - '^tests:' + - '^Update\sREADME.md' + +nfpms: + - + file_name_template: "s2_package__{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" + vendor: Klaus Post + homepage: https://github.com/klauspost/compress + maintainer: Klaus Post + description: S2 Compression Tool + license: BSD 3-Clause + formats: + - deb + - rpm diff --git a/vendor/github.com/klauspost/compress/LICENSE b/vendor/github.com/klauspost/compress/LICENSE new file mode 100644 index 00000000..87d55747 --- /dev/null +++ b/vendor/github.com/klauspost/compress/LICENSE @@ -0,0 +1,304 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2019 Klaus Post. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------ + +Files: gzhttp/* + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016-2017 The New York Times Company + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------------ + +Files: s2/cmd/internal/readahead/* + +The MIT License (MIT) + +Copyright (c) 2015 Klaus Post + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +--------------------- +Files: snappy/* +Files: internal/snapref/* + +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +----------------- + +Files: s2/cmd/internal/filepathx/* + +Copyright 2016 The filepathx Authors + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md new file mode 100644 index 00000000..684a3085 --- /dev/null +++ b/vendor/github.com/klauspost/compress/README.md @@ -0,0 +1,714 @@ +# compress + +This package provides various compression algorithms. + +* [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression in pure Go. +* [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) is a high performance replacement for Snappy. +* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). +* [snappy](https://github.com/klauspost/compress/tree/master/snappy) is a drop-in replacement for `github.com/golang/snappy` offering better compression and concurrent streams. +* [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding. +* [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped requests efficiently. +* [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation. + +[![Go Reference](https://pkg.go.dev/badge/klauspost/compress.svg)](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories) +[![Go](https://github.com/klauspost/compress/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/compress/actions/workflows/go.yml) +[![Sourcegraph Badge](https://sourcegraph.com/github.com/klauspost/compress/-/badge.svg)](https://sourcegraph.com/github.com/klauspost/compress?badge) + +# changelog + +* Jun 12th, 2024 - [1.17.9](https://github.com/klauspost/compress/releases/tag/v1.17.9) + * s2: Reduce ReadFrom temporary allocations https://github.com/klauspost/compress/pull/949 + * flate, zstd: Shave some bytes off amd64 matchLen by @greatroar in https://github.com/klauspost/compress/pull/963 + * Upgrade zip/zlib to 1.22.4 upstream https://github.com/klauspost/compress/pull/970 https://github.com/klauspost/compress/pull/971 + * zstd: BuildDict fails with RLE table https://github.com/klauspost/compress/pull/951 + +* Apr 9th, 2024 - [1.17.8](https://github.com/klauspost/compress/releases/tag/v1.17.8) + * zstd: Reject blocks where reserved values are not 0 https://github.com/klauspost/compress/pull/885 + * zstd: Add RLE detection+encoding https://github.com/klauspost/compress/pull/938 + +* Feb 21st, 2024 - [1.17.7](https://github.com/klauspost/compress/releases/tag/v1.17.7) + * s2: Add AsyncFlush method: Complete the block without flushing by @Jille in https://github.com/klauspost/compress/pull/927 + * s2: Fix literal+repeat exceeds dst crash https://github.com/klauspost/compress/pull/930 + +* Feb 5th, 2024 - [1.17.6](https://github.com/klauspost/compress/releases/tag/v1.17.6) + * zstd: Fix incorrect repeat coding in best mode https://github.com/klauspost/compress/pull/923 + * s2: Fix DecodeConcurrent deadlock on errors https://github.com/klauspost/compress/pull/925 + +* Jan 26th, 2024 - [v1.17.5](https://github.com/klauspost/compress/releases/tag/v1.17.5) + * flate: Fix reset with dictionary on custom window encodes https://github.com/klauspost/compress/pull/912 + * zstd: Add Frame header encoding and stripping https://github.com/klauspost/compress/pull/908 + * zstd: Limit better/best default window to 8MB https://github.com/klauspost/compress/pull/913 + * zstd: Speed improvements by @greatroar in https://github.com/klauspost/compress/pull/896 https://github.com/klauspost/compress/pull/910 + * s2: Fix callbacks for skippable blocks and disallow 0xfe (Padding) by @Jille in https://github.com/klauspost/compress/pull/916 https://github.com/klauspost/compress/pull/917 +https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/compress/pull/918 + +* Dec 1st, 2023 - [v1.17.4](https://github.com/klauspost/compress/releases/tag/v1.17.4) + * huff0: Speed up symbol counting by @greatroar in https://github.com/klauspost/compress/pull/887 + * huff0: Remove byteReader by @greatroar in https://github.com/klauspost/compress/pull/886 + * gzhttp: Allow overriding decompression on transport https://github.com/klauspost/compress/pull/892 + * gzhttp: Clamp compression level https://github.com/klauspost/compress/pull/890 + * gzip: Error out if reserved bits are set https://github.com/klauspost/compress/pull/891 + +* Nov 15th, 2023 - [v1.17.3](https://github.com/klauspost/compress/releases/tag/v1.17.3) + * fse: Fix max header size https://github.com/klauspost/compress/pull/881 + * zstd: Improve better/best compression https://github.com/klauspost/compress/pull/877 + * gzhttp: Fix missing content type on Close https://github.com/klauspost/compress/pull/883 + +* Oct 22nd, 2023 - [v1.17.2](https://github.com/klauspost/compress/releases/tag/v1.17.2) + * zstd: Fix rare *CORRUPTION* output in "best" mode. See https://github.com/klauspost/compress/pull/876 + +* Oct 14th, 2023 - [v1.17.1](https://github.com/klauspost/compress/releases/tag/v1.17.1) + * s2: Fix S2 "best" dictionary wrong encoding by @klauspost in https://github.com/klauspost/compress/pull/871 + * flate: Reduce allocations in decompressor and minor code improvements by @fakefloordiv in https://github.com/klauspost/compress/pull/869 + * s2: Fix EstimateBlockSize on 6&7 length input by @klauspost in https://github.com/klauspost/compress/pull/867 + +* Sept 19th, 2023 - [v1.17.0](https://github.com/klauspost/compress/releases/tag/v1.17.0) + * Add experimental dictionary builder https://github.com/klauspost/compress/pull/853 + * Add xerial snappy read/writer https://github.com/klauspost/compress/pull/838 + * flate: Add limited window compression https://github.com/klauspost/compress/pull/843 + * s2: Do 2 overlapping match checks https://github.com/klauspost/compress/pull/839 + * flate: Add amd64 assembly matchlen https://github.com/klauspost/compress/pull/837 + * gzip: Copy bufio.Reader on Reset by @thatguystone in https://github.com/klauspost/compress/pull/860 + +
+ See changes to v1.16.x + + +* July 1st, 2023 - [v1.16.7](https://github.com/klauspost/compress/releases/tag/v1.16.7) + * zstd: Fix default level first dictionary encode https://github.com/klauspost/compress/pull/829 + * s2: add GetBufferCapacity() method by @GiedriusS in https://github.com/klauspost/compress/pull/832 + +* June 13, 2023 - [v1.16.6](https://github.com/klauspost/compress/releases/tag/v1.16.6) + * zstd: correctly ignore WithEncoderPadding(1) by @ianlancetaylor in https://github.com/klauspost/compress/pull/806 + * zstd: Add amd64 match length assembly https://github.com/klauspost/compress/pull/824 + * gzhttp: Handle informational headers by @rtribotte in https://github.com/klauspost/compress/pull/815 + * s2: Improve Better compression slightly https://github.com/klauspost/compress/pull/663 + +* Apr 16, 2023 - [v1.16.5](https://github.com/klauspost/compress/releases/tag/v1.16.5) + * zstd: readByte needs to use io.ReadFull by @jnoxon in https://github.com/klauspost/compress/pull/802 + * gzip: Fix WriterTo after initial read https://github.com/klauspost/compress/pull/804 + +* Apr 5, 2023 - [v1.16.4](https://github.com/klauspost/compress/releases/tag/v1.16.4) + * zstd: Improve zstd best efficiency by @greatroar and @klauspost in https://github.com/klauspost/compress/pull/784 + * zstd: Respect WithAllLitEntropyCompression https://github.com/klauspost/compress/pull/792 + * zstd: Fix amd64 not always detecting corrupt data https://github.com/klauspost/compress/pull/785 + * zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795 + * s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779 + * s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780 + * gzhttp: Support ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799 + +* Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1) + * zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776 + * gzhttp: Add optional [BREACH mitigation](https://github.com/klauspost/compress/tree/master/gzhttp#breach-mitigation). https://github.com/klauspost/compress/pull/762 https://github.com/klauspost/compress/pull/768 https://github.com/klauspost/compress/pull/769 https://github.com/klauspost/compress/pull/770 https://github.com/klauspost/compress/pull/767 + * s2: Add Intel LZ4s converter https://github.com/klauspost/compress/pull/766 + * zstd: Minor bug fixes https://github.com/klauspost/compress/pull/771 https://github.com/klauspost/compress/pull/772 https://github.com/klauspost/compress/pull/773 + * huff0: Speed up compress1xDo by @greatroar in https://github.com/klauspost/compress/pull/774 + +* Feb 26, 2023 - [v1.16.0](https://github.com/klauspost/compress/releases/tag/v1.16.0) + * s2: Add [Dictionary](https://github.com/klauspost/compress/tree/master/s2#dictionaries) support. https://github.com/klauspost/compress/pull/685 + * s2: Add Compression Size Estimate. https://github.com/klauspost/compress/pull/752 + * s2: Add support for custom stream encoder. https://github.com/klauspost/compress/pull/755 + * s2: Add LZ4 block converter. https://github.com/klauspost/compress/pull/748 + * s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747 + * s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746 +
+ +
+ See changes to v1.15.x + +* Jan 21st, 2023 (v1.15.15) + * deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739 + * zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728 + * zstd: Various speed improvements by @greatroar https://github.com/klauspost/compress/pull/741 https://github.com/klauspost/compress/pull/734 https://github.com/klauspost/compress/pull/736 https://github.com/klauspost/compress/pull/744 https://github.com/klauspost/compress/pull/743 https://github.com/klauspost/compress/pull/745 + * gzhttp: Add SuffixETag() and DropETag() options to prevent ETag collisions on compressed responses by @willbicks in https://github.com/klauspost/compress/pull/740 + +* Jan 3rd, 2023 (v1.15.14) + + * flate: Improve speed in big stateless blocks https://github.com/klauspost/compress/pull/718 + * zstd: Minor speed tweaks by @greatroar in https://github.com/klauspost/compress/pull/716 https://github.com/klauspost/compress/pull/720 + * export NoGzipResponseWriter for custom ResponseWriter wrappers by @harshavardhana in https://github.com/klauspost/compress/pull/722 + * s2: Add example for indexing and existing stream https://github.com/klauspost/compress/pull/723 + +* Dec 11, 2022 (v1.15.13) + * zstd: Add [MaxEncodedSize](https://pkg.go.dev/github.com/klauspost/compress@v1.15.13/zstd#Encoder.MaxEncodedSize) to encoder https://github.com/klauspost/compress/pull/691 + * zstd: Various tweaks and improvements https://github.com/klauspost/compress/pull/693 https://github.com/klauspost/compress/pull/695 https://github.com/klauspost/compress/pull/696 https://github.com/klauspost/compress/pull/701 https://github.com/klauspost/compress/pull/702 https://github.com/klauspost/compress/pull/703 https://github.com/klauspost/compress/pull/704 https://github.com/klauspost/compress/pull/705 https://github.com/klauspost/compress/pull/706 https://github.com/klauspost/compress/pull/707 https://github.com/klauspost/compress/pull/708 + +* Oct 26, 2022 (v1.15.12) + + * zstd: Tweak decoder allocs. https://github.com/klauspost/compress/pull/680 + * gzhttp: Always delete `HeaderNoCompression` https://github.com/klauspost/compress/pull/683 + +* Sept 26, 2022 (v1.15.11) + + * flate: Improve level 1-3 compression https://github.com/klauspost/compress/pull/678 + * zstd: Improve "best" compression by @nightwolfz in https://github.com/klauspost/compress/pull/677 + * zstd: Fix+reduce decompression allocations https://github.com/klauspost/compress/pull/668 + * zstd: Fix non-effective noescape tag https://github.com/klauspost/compress/pull/667 + +* Sept 16, 2022 (v1.15.10) + + * zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649 + * Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651 + * flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656 + * zstd: Improve "better" compression https://github.com/klauspost/compress/pull/657 + * s2: Improve "best" compression https://github.com/klauspost/compress/pull/658 + * s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635 + * s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646 + * Use arrays for constant size copies https://github.com/klauspost/compress/pull/659 + +* July 21, 2022 (v1.15.9) + + * zstd: Fix decoder crash on amd64 (no BMI) on invalid input https://github.com/klauspost/compress/pull/645 + * zstd: Disable decoder extended memory copies (amd64) due to possible crashes https://github.com/klauspost/compress/pull/644 + * zstd: Allow single segments up to "max decoded size" by @klauspost in https://github.com/klauspost/compress/pull/643 + +* July 13, 2022 (v1.15.8) + + * gzip: fix stack exhaustion bug in Reader.Read https://github.com/klauspost/compress/pull/641 + * s2: Add Index header trim/restore https://github.com/klauspost/compress/pull/638 + * zstd: Optimize seqdeq amd64 asm by @greatroar in https://github.com/klauspost/compress/pull/636 + * zstd: Improve decoder memcopy https://github.com/klauspost/compress/pull/637 + * huff0: Pass a single bitReader pointer to asm by @greatroar in https://github.com/klauspost/compress/pull/634 + * zstd: Branchless getBits for amd64 w/o BMI2 by @greatroar in https://github.com/klauspost/compress/pull/640 + * gzhttp: Remove header before writing https://github.com/klauspost/compress/pull/639 + +* June 29, 2022 (v1.15.7) + + * s2: Fix absolute forward seeks https://github.com/klauspost/compress/pull/633 + * zip: Merge upstream https://github.com/klauspost/compress/pull/631 + * zip: Re-add zip64 fix https://github.com/klauspost/compress/pull/624 + * zstd: translate fseDecoder.buildDtable into asm by @WojciechMula in https://github.com/klauspost/compress/pull/598 + * flate: Faster histograms https://github.com/klauspost/compress/pull/620 + * deflate: Use compound hcode https://github.com/klauspost/compress/pull/622 + +* June 3, 2022 (v1.15.6) + * s2: Improve coding for long, close matches https://github.com/klauspost/compress/pull/613 + * s2c: Add Snappy/S2 stream recompression https://github.com/klauspost/compress/pull/611 + * zstd: Always use configured block size https://github.com/klauspost/compress/pull/605 + * zstd: Fix incorrect hash table placement for dict encoding in default https://github.com/klauspost/compress/pull/606 + * zstd: Apply default config to ZipDecompressor without options https://github.com/klauspost/compress/pull/608 + * gzhttp: Exclude more common archive formats https://github.com/klauspost/compress/pull/612 + * s2: Add ReaderIgnoreCRC https://github.com/klauspost/compress/pull/609 + * s2: Remove sanity load on index creation https://github.com/klauspost/compress/pull/607 + * snappy: Use dedicated function for scoring https://github.com/klauspost/compress/pull/614 + * s2c+s2d: Use official snappy framed extension https://github.com/klauspost/compress/pull/610 + +* May 25, 2022 (v1.15.5) + * s2: Add concurrent stream decompression https://github.com/klauspost/compress/pull/602 + * s2: Fix final emit oob read crash on amd64 https://github.com/klauspost/compress/pull/601 + * huff0: asm implementation of Decompress1X by @WojciechMula https://github.com/klauspost/compress/pull/596 + * zstd: Use 1 less goroutine for stream decoding https://github.com/klauspost/compress/pull/588 + * zstd: Copy literal in 16 byte blocks when possible https://github.com/klauspost/compress/pull/592 + * zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599 + * zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593 + * huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586 + * flate: Inplace hashing for level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/590 + + +* May 11, 2022 (v1.15.4) + * huff0: decompress directly into output by @WojciechMula in [#577](https://github.com/klauspost/compress/pull/577) + * inflate: Keep dict on stack [#581](https://github.com/klauspost/compress/pull/581) + * zstd: Faster decoding memcopy in asm [#583](https://github.com/klauspost/compress/pull/583) + * zstd: Fix ignored crc [#580](https://github.com/klauspost/compress/pull/580) + +* May 5, 2022 (v1.15.3) + * zstd: Allow to ignore checksum checking by @WojciechMula [#572](https://github.com/klauspost/compress/pull/572) + * s2: Fix incorrect seek for io.SeekEnd in [#575](https://github.com/klauspost/compress/pull/575) + +* Apr 26, 2022 (v1.15.2) + * zstd: Add x86-64 assembly for decompression on streams and blocks. Contributed by [@WojciechMula](https://github.com/WojciechMula). Typically 2x faster. [#528](https://github.com/klauspost/compress/pull/528) [#531](https://github.com/klauspost/compress/pull/531) [#545](https://github.com/klauspost/compress/pull/545) [#537](https://github.com/klauspost/compress/pull/537) + * zstd: Add options to ZipDecompressor and fixes [#539](https://github.com/klauspost/compress/pull/539) + * s2: Use sorted search for index [#555](https://github.com/klauspost/compress/pull/555) + * Minimum version is Go 1.16, added CI test on 1.18. + +* Mar 11, 2022 (v1.15.1) + * huff0: Add x86 assembly of Decode4X by @WojciechMula in [#512](https://github.com/klauspost/compress/pull/512) + * zstd: Reuse zip decoders in [#514](https://github.com/klauspost/compress/pull/514) + * zstd: Detect extra block data and report as corrupted in [#520](https://github.com/klauspost/compress/pull/520) + * zstd: Handle zero sized frame content size stricter in [#521](https://github.com/klauspost/compress/pull/521) + * zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523) + +* Mar 3, 2022 (v1.15.0) + * zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498) + * zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505) + * huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507) + * flate: Inline literal emission by @klauspost in [#509](https://github.com/klauspost/compress/pull/509) + * gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400) + * gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510) + +Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines. + +Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected. + +While the release has been extensively tested, it is recommended to testing when upgrading. + +
+ +
+ See changes to v1.14.x + +* Feb 22, 2022 (v1.14.4) + * flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503) + * zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502) + * zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501) #501 + * huff0: Use static decompression buffer up to 30% faster by @klauspost in [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500) + +* Feb 17, 2022 (v1.14.3) + * flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478) + * flate: Faster decompression speed, ~5-10%. [#483](https://github.com/klauspost/compress/pull/483) + * s2: Faster compression with Go v1.18 and amd64 microarch level 3+. [#484](https://github.com/klauspost/compress/pull/484) [#486](https://github.com/klauspost/compress/pull/486) + +* Jan 25, 2022 (v1.14.2) + * zstd: improve header decoder by @dsnet [#476](https://github.com/klauspost/compress/pull/476) + * zstd: Add bigger default blocks [#469](https://github.com/klauspost/compress/pull/469) + * zstd: Remove unused decompression buffer [#470](https://github.com/klauspost/compress/pull/470) + * zstd: Fix logically dead code by @ningmingxiao [#472](https://github.com/klauspost/compress/pull/472) + * flate: Improve level 7-9 [#471](https://github.com/klauspost/compress/pull/471) [#473](https://github.com/klauspost/compress/pull/473) + * zstd: Add noasm tag for xxhash [#475](https://github.com/klauspost/compress/pull/475) + +* Jan 11, 2022 (v1.14.1) + * s2: Add stream index in [#462](https://github.com/klauspost/compress/pull/462) + * flate: Speed and efficiency improvements in [#439](https://github.com/klauspost/compress/pull/439) [#461](https://github.com/klauspost/compress/pull/461) [#455](https://github.com/klauspost/compress/pull/455) [#452](https://github.com/klauspost/compress/pull/452) [#458](https://github.com/klauspost/compress/pull/458) + * zstd: Performance improvement in [#420]( https://github.com/klauspost/compress/pull/420) [#456](https://github.com/klauspost/compress/pull/456) [#437](https://github.com/klauspost/compress/pull/437) [#467](https://github.com/klauspost/compress/pull/467) [#468](https://github.com/klauspost/compress/pull/468) + * zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464) + * Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445) +
+ +
+ See changes to v1.13.x + +* Aug 30, 2021 (v1.13.5) + * gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425) + * s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413) + * zstd: pooledZipWriter should return Writers to the same pool [#426](https://github.com/klauspost/compress/pull/426) + * Removed golang/snappy as external dependency for tests [#421](https://github.com/klauspost/compress/pull/421) + +* Aug 12, 2021 (v1.13.4) + * Add [snappy replacement package](https://github.com/klauspost/compress/tree/master/snappy). + * zstd: Fix incorrect encoding in "best" mode [#415](https://github.com/klauspost/compress/pull/415) + +* Aug 3, 2021 (v1.13.3) + * zstd: Improve Best compression [#404](https://github.com/klauspost/compress/pull/404) + * zstd: Fix WriteTo error forwarding [#411](https://github.com/klauspost/compress/pull/411) + * gzhttp: Return http.HandlerFunc instead of http.Handler. Unlikely breaking change. [#406](https://github.com/klauspost/compress/pull/406) + * s2sx: Fix max size error [#399](https://github.com/klauspost/compress/pull/399) + * zstd: Add optional stream content size on reset [#401](https://github.com/klauspost/compress/pull/401) + * zstd: use SpeedBestCompression for level >= 10 [#410](https://github.com/klauspost/compress/pull/410) + +* Jun 14, 2021 (v1.13.1) + * s2: Add full Snappy output support [#396](https://github.com/klauspost/compress/pull/396) + * zstd: Add configurable [Decoder window](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithDecoderMaxWindow) size [#394](https://github.com/klauspost/compress/pull/394) + * gzhttp: Add header to skip compression [#389](https://github.com/klauspost/compress/pull/389) + * s2: Improve speed with bigger output margin [#395](https://github.com/klauspost/compress/pull/395) + +* Jun 3, 2021 (v1.13.0) + * Added [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp#gzip-handler) which allows wrapping HTTP servers and clients with GZIP compressors. + * zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382) + * zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380) +
+ + +
+ See changes to v1.12.x + +* May 25, 2021 (v1.12.3) + * deflate: Better/faster Huffman encoding [#374](https://github.com/klauspost/compress/pull/374) + * deflate: Allocate less for history. [#375](https://github.com/klauspost/compress/pull/375) + * zstd: Forward read errors [#373](https://github.com/klauspost/compress/pull/373) + +* Apr 27, 2021 (v1.12.2) + * zstd: Improve better/best compression [#360](https://github.com/klauspost/compress/pull/360) [#364](https://github.com/klauspost/compress/pull/364) [#365](https://github.com/klauspost/compress/pull/365) + * zstd: Add helpers to compress/decompress zstd inside zip files [#363](https://github.com/klauspost/compress/pull/363) + * deflate: Improve level 5+6 compression [#367](https://github.com/klauspost/compress/pull/367) + * s2: Improve better/best compression [#358](https://github.com/klauspost/compress/pull/358) [#359](https://github.com/klauspost/compress/pull/358) + * s2: Load after checking src limit on amd64. [#362](https://github.com/klauspost/compress/pull/362) + * s2sx: Limit max executable size [#368](https://github.com/klauspost/compress/pull/368) + +* Apr 14, 2021 (v1.12.1) + * snappy package removed. Upstream added as dependency. + * s2: Better compression in "best" mode [#353](https://github.com/klauspost/compress/pull/353) + * s2sx: Add stdin input and detect pre-compressed from signature [#352](https://github.com/klauspost/compress/pull/352) + * s2c/s2d: Add http as possible input [#348](https://github.com/klauspost/compress/pull/348) + * s2c/s2d/s2sx: Always truncate when writing files [#352](https://github.com/klauspost/compress/pull/352) + * zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346) + * s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349) +
+ +
+ See changes to v1.11.x + +* Mar 26, 2021 (v1.11.13) + * zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345) + * zstd: Add [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) encoder option [#336](https://github.com/klauspost/compress/pull/336) + * deflate: Improve entropy compression [#338](https://github.com/klauspost/compress/pull/338) + * s2: Clean up and minor performance improvement in best [#341](https://github.com/klauspost/compress/pull/341) + +* Mar 5, 2021 (v1.11.12) + * s2: Add `s2sx` binary that creates [self extracting archives](https://github.com/klauspost/compress/tree/master/s2#s2sx-self-extracting-archives). + * s2: Speed up decompression on non-assembly platforms [#328](https://github.com/klauspost/compress/pull/328) + +* Mar 1, 2021 (v1.11.9) + * s2: Add ARM64 decompression assembly. Around 2x output speed. [#324](https://github.com/klauspost/compress/pull/324) + * s2: Improve "better" speed and efficiency. [#325](https://github.com/klauspost/compress/pull/325) + * s2: Fix binaries. + +* Feb 25, 2021 (v1.11.8) + * s2: Fixed occasional out-of-bounds write on amd64. Upgrade recommended. + * s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315) + * s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322) + * zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314) + * zip: Fix zip64 headers. [#313](https://github.com/klauspost/compress/pull/313) + +* Jan 14, 2021 (v1.11.7) + * Use Bytes() interface to get bytes across packages. [#309](https://github.com/klauspost/compress/pull/309) + * s2: Add 'best' compression option. [#310](https://github.com/klauspost/compress/pull/310) + * s2: Add ReaderMaxBlockSize, changes `s2.NewReader` signature to include varargs. [#311](https://github.com/klauspost/compress/pull/311) + * s2: Fix crash on small better buffers. [#308](https://github.com/klauspost/compress/pull/308) + * s2: Clean up decoder. [#312](https://github.com/klauspost/compress/pull/312) + +* Jan 7, 2021 (v1.11.6) + * zstd: Make decoder allocations smaller [#306](https://github.com/klauspost/compress/pull/306) + * zstd: Free Decoder resources when Reset is called with a nil io.Reader [#305](https://github.com/klauspost/compress/pull/305) + +* Dec 20, 2020 (v1.11.4) + * zstd: Add Best compression mode [#304](https://github.com/klauspost/compress/pull/304) + * Add header decoder [#299](https://github.com/klauspost/compress/pull/299) + * s2: Add uncompressed stream option [#297](https://github.com/klauspost/compress/pull/297) + * Simplify/speed up small blocks with known max size. [#300](https://github.com/klauspost/compress/pull/300) + * zstd: Always reset literal dict encoder [#303](https://github.com/klauspost/compress/pull/303) + +* Nov 15, 2020 (v1.11.3) + * inflate: 10-15% faster decompression [#293](https://github.com/klauspost/compress/pull/293) + * zstd: Tweak DecodeAll default allocation [#295](https://github.com/klauspost/compress/pull/295) + +* Oct 11, 2020 (v1.11.2) + * s2: Fix out of bounds read in "better" block compression [#291](https://github.com/klauspost/compress/pull/291) + +* Oct 1, 2020 (v1.11.1) + * zstd: Set allLitEntropy true in default configuration [#286](https://github.com/klauspost/compress/pull/286) + +* Sept 8, 2020 (v1.11.0) + * zstd: Add experimental compression [dictionaries](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) [#281](https://github.com/klauspost/compress/pull/281) + * zstd: Fix mixed Write and ReadFrom calls [#282](https://github.com/klauspost/compress/pull/282) + * inflate/gz: Limit variable shifts, ~5% faster decompression [#274](https://github.com/klauspost/compress/pull/274) +
+ +
+ See changes to v1.10.x + +* July 8, 2020 (v1.10.11) + * zstd: Fix extra block when compressing with ReadFrom. [#278](https://github.com/klauspost/compress/pull/278) + * huff0: Also populate compression table when reading decoding table. [#275](https://github.com/klauspost/compress/pull/275) + +* June 23, 2020 (v1.10.10) + * zstd: Skip entropy compression in fastest mode when no matches. [#270](https://github.com/klauspost/compress/pull/270) + +* June 16, 2020 (v1.10.9): + * zstd: API change for specifying dictionaries. See [#268](https://github.com/klauspost/compress/pull/268) + * zip: update CreateHeaderRaw to handle zip64 fields. [#266](https://github.com/klauspost/compress/pull/266) + * Fuzzit tests removed. The service has been purchased and is no longer available. + +* June 5, 2020 (v1.10.8): + * 1.15x faster zstd block decompression. [#265](https://github.com/klauspost/compress/pull/265) + +* June 1, 2020 (v1.10.7): + * Added zstd decompression [dictionary support](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) + * Increase zstd decompression speed up to 1.19x. [#259](https://github.com/klauspost/compress/pull/259) + * Remove internal reset call in zstd compression and reduce allocations. [#263](https://github.com/klauspost/compress/pull/263) + +* May 21, 2020: (v1.10.6) + * zstd: Reduce allocations while decoding. [#258](https://github.com/klauspost/compress/pull/258), [#252](https://github.com/klauspost/compress/pull/252) + * zstd: Stricter decompression checks. + +* April 12, 2020: (v1.10.5) + * s2-commands: Flush output when receiving SIGINT. [#239](https://github.com/klauspost/compress/pull/239) + +* Apr 8, 2020: (v1.10.4) + * zstd: Minor/special case optimizations. [#251](https://github.com/klauspost/compress/pull/251), [#250](https://github.com/klauspost/compress/pull/250), [#249](https://github.com/klauspost/compress/pull/249), [#247](https://github.com/klauspost/compress/pull/247) +* Mar 11, 2020: (v1.10.3) + * s2: Use S2 encoder in pure Go mode for Snappy output as well. [#245](https://github.com/klauspost/compress/pull/245) + * s2: Fix pure Go block encoder. [#244](https://github.com/klauspost/compress/pull/244) + * zstd: Added "better compression" mode. [#240](https://github.com/klauspost/compress/pull/240) + * zstd: Improve speed of fastest compression mode by 5-10% [#241](https://github.com/klauspost/compress/pull/241) + * zstd: Skip creating encoders when not needed. [#238](https://github.com/klauspost/compress/pull/238) + +* Feb 27, 2020: (v1.10.2) + * Close to 50% speedup in inflate (gzip/zip decompression). [#236](https://github.com/klauspost/compress/pull/236) [#234](https://github.com/klauspost/compress/pull/234) [#232](https://github.com/klauspost/compress/pull/232) + * Reduce deflate level 1-6 memory usage up to 59%. [#227](https://github.com/klauspost/compress/pull/227) + +* Feb 18, 2020: (v1.10.1) + * Fix zstd crash when resetting multiple times without sending data. [#226](https://github.com/klauspost/compress/pull/226) + * deflate: Fix dictionary use on level 1-6. [#224](https://github.com/klauspost/compress/pull/224) + * Remove deflate writer reference when closing. [#224](https://github.com/klauspost/compress/pull/224) + +* Feb 4, 2020: (v1.10.0) + * Add optional dictionary to [stateless deflate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc#StatelessDeflate). Breaking change, send `nil` for previous behaviour. [#216](https://github.com/klauspost/compress/pull/216) + * Fix buffer overflow on repeated small block deflate. [#218](https://github.com/klauspost/compress/pull/218) + * Allow copying content from an existing ZIP file without decompressing+compressing. [#214](https://github.com/klauspost/compress/pull/214) + * Added [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) AMD64 assembler and various optimizations. Stream speed >10GB/s. [#186](https://github.com/klauspost/compress/pull/186) + +
+ +
+ See changes prior to v1.10.0 + +* Jan 20,2020 (v1.9.8) Optimize gzip/deflate with better size estimates and faster table generation. [#207](https://github.com/klauspost/compress/pull/207) by [luyu6056](https://github.com/luyu6056), [#206](https://github.com/klauspost/compress/pull/206). +* Jan 11, 2020: S2 Encode/Decode will use provided buffer if capacity is big enough. [#204](https://github.com/klauspost/compress/pull/204) +* Jan 5, 2020: (v1.9.7) Fix another zstd regression in v1.9.5 - v1.9.6 removed. +* Jan 4, 2020: (v1.9.6) Regression in v1.9.5 fixed causing corrupt zstd encodes in rare cases. +* Jan 4, 2020: Faster IO in [s2c + s2d commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) compression/decompression. [#192](https://github.com/klauspost/compress/pull/192) +* Dec 29, 2019: Removed v1.9.5 since fuzz tests showed a compatibility problem with the reference zstandard decoder. +* Dec 29, 2019: (v1.9.5) zstd: 10-20% faster block compression. [#199](https://github.com/klauspost/compress/pull/199) +* Dec 29, 2019: [zip](https://godoc.org/github.com/klauspost/compress/zip) package updated with latest Go features +* Dec 29, 2019: zstd: Single segment flag condintions tweaked. [#197](https://github.com/klauspost/compress/pull/197) +* Dec 18, 2019: s2: Faster compression when ReadFrom is used. [#198](https://github.com/klauspost/compress/pull/198) +* Dec 10, 2019: s2: Fix repeat length output when just above at 16MB limit. +* Dec 10, 2019: zstd: Add function to get decoder as io.ReadCloser. [#191](https://github.com/klauspost/compress/pull/191) +* Dec 3, 2019: (v1.9.4) S2: limit max repeat length. [#188](https://github.com/klauspost/compress/pull/188) +* Dec 3, 2019: Add [WithNoEntropyCompression](https://godoc.org/github.com/klauspost/compress/zstd#WithNoEntropyCompression) to zstd [#187](https://github.com/klauspost/compress/pull/187) +* Dec 3, 2019: Reduce memory use for tests. Check for leaked goroutines. +* Nov 28, 2019 (v1.9.3) Less allocations in stateless deflate. +* Nov 28, 2019: 5-20% Faster huff0 decode. Impacts zstd as well. [#184](https://github.com/klauspost/compress/pull/184) +* Nov 12, 2019 (v1.9.2) Added [Stateless Compression](#stateless-compression) for gzip/deflate. +* Nov 12, 2019: Fixed zstd decompression of large single blocks. [#180](https://github.com/klauspost/compress/pull/180) +* Nov 11, 2019: Set default [s2c](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) block size to 4MB. +* Nov 11, 2019: Reduce inflate memory use by 1KB. +* Nov 10, 2019: Less allocations in deflate bit writer. +* Nov 10, 2019: Fix inconsistent error returned by zstd decoder. +* Oct 28, 2019 (v1.9.1) ztsd: Fix crash when compressing blocks. [#174](https://github.com/klauspost/compress/pull/174) +* Oct 24, 2019 (v1.9.0) zstd: Fix rare data corruption [#173](https://github.com/klauspost/compress/pull/173) +* Oct 24, 2019 zstd: Fix huff0 out of buffer write [#171](https://github.com/klauspost/compress/pull/171) and always return errors [#172](https://github.com/klauspost/compress/pull/172) +* Oct 10, 2019: Big deflate rewrite, 30-40% faster with better compression [#105](https://github.com/klauspost/compress/pull/105) + +
+ +
+ See changes prior to v1.9.0 + +* Oct 10, 2019: (v1.8.6) zstd: Allow partial reads to get flushed data. [#169](https://github.com/klauspost/compress/pull/169) +* Oct 3, 2019: Fix inconsistent results on broken zstd streams. +* Sep 25, 2019: Added `-rm` (remove source files) and `-q` (no output except errors) to `s2c` and `s2d` [commands](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) +* Sep 16, 2019: (v1.8.4) Add `s2c` and `s2d` [commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools). +* Sep 10, 2019: (v1.8.3) Fix s2 decoder [Skip](https://godoc.org/github.com/klauspost/compress/s2#Reader.Skip). +* Sep 7, 2019: zstd: Added [WithWindowSize](https://godoc.org/github.com/klauspost/compress/zstd#WithWindowSize), contributed by [ianwilkes](https://github.com/ianwilkes). +* Sep 5, 2019: (v1.8.2) Add [WithZeroFrames](https://godoc.org/github.com/klauspost/compress/zstd#WithZeroFrames) which adds full zero payload block encoding option. +* Sep 5, 2019: Lazy initialization of zstandard predefined en/decoder tables. +* Aug 26, 2019: (v1.8.1) S2: 1-2% compression increase in "better" compression mode. +* Aug 26, 2019: zstd: Check maximum size of Huffman 1X compressed literals while decoding. +* Aug 24, 2019: (v1.8.0) Added [S2 compression](https://github.com/klauspost/compress/tree/master/s2#s2-compression), a high performance replacement for Snappy. +* Aug 21, 2019: (v1.7.6) Fixed minor issues found by fuzzer. One could lead to zstd not decompressing. +* Aug 18, 2019: Add [fuzzit](https://fuzzit.dev/) continuous fuzzing. +* Aug 14, 2019: zstd: Skip incompressible data 2x faster. [#147](https://github.com/klauspost/compress/pull/147) +* Aug 4, 2019 (v1.7.5): Better literal compression. [#146](https://github.com/klauspost/compress/pull/146) +* Aug 4, 2019: Faster zstd compression. [#143](https://github.com/klauspost/compress/pull/143) [#144](https://github.com/klauspost/compress/pull/144) +* Aug 4, 2019: Faster zstd decompression. [#145](https://github.com/klauspost/compress/pull/145) [#143](https://github.com/klauspost/compress/pull/143) [#142](https://github.com/klauspost/compress/pull/142) +* July 15, 2019 (v1.7.4): Fix double EOF block in rare cases on zstd encoder. +* July 15, 2019 (v1.7.3): Minor speedup/compression increase in default zstd encoder. +* July 14, 2019: zstd decoder: Fix decompression error on multiple uses with mixed content. +* July 7, 2019 (v1.7.2): Snappy update, zstd decoder potential race fix. +* June 17, 2019: zstd decompression bugfix. +* June 17, 2019: fix 32 bit builds. +* June 17, 2019: Easier use in modules (less dependencies). +* June 9, 2019: New stronger "default" [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression mode. Matches zstd default compression ratio. +* June 5, 2019: 20-40% throughput in [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and better compression. +* June 5, 2019: deflate/gzip compression: Reduce memory usage of lower compression levels. +* June 2, 2019: Added [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression! +* May 25, 2019: deflate/gzip: 10% faster bit writer, mostly visible in lower levels. +* Apr 22, 2019: [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) decompression added. +* Aug 1, 2018: Added [huff0 README](https://github.com/klauspost/compress/tree/master/huff0#huff0-entropy-compression). +* Jul 8, 2018: Added [Performance Update 2018](#performance-update-2018) below. +* Jun 23, 2018: Merged [Go 1.11 inflate optimizations](https://go-review.googlesource.com/c/go/+/102235). Go 1.9 is now required. Backwards compatible version tagged with [v1.3.0](https://github.com/klauspost/compress/releases/tag/v1.3.0). +* Apr 2, 2018: Added [huff0](https://godoc.org/github.com/klauspost/compress/huff0) en/decoder. Experimental for now, API may change. +* Mar 4, 2018: Added [FSE Entropy](https://godoc.org/github.com/klauspost/compress/fse) en/decoder. Experimental for now, API may change. +* Nov 3, 2017: Add compression [Estimate](https://godoc.org/github.com/klauspost/compress#Estimate) function. +* May 28, 2017: Reduce allocations when resetting decoder. +* Apr 02, 2017: Change back to official crc32, since changes were merged in Go 1.7. +* Jan 14, 2017: Reduce stack pressure due to array copies. See [Issue #18625](https://github.com/golang/go/issues/18625). +* Oct 25, 2016: Level 2-4 have been rewritten and now offers significantly better performance than before. +* Oct 20, 2016: Port zlib changes from Go 1.7 to fix zlib writer issue. Please update. +* Oct 16, 2016: Go 1.7 changes merged. Apples to apples this package is a few percent faster, but has a significantly better balance between speed and compression per level. +* Mar 24, 2016: Always attempt Huffman encoding on level 4-7. This improves base 64 encoded data compression. +* Mar 24, 2016: Small speedup for level 1-3. +* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster. +* Feb 19, 2016: Handle small payloads faster in level 1-3. +* Feb 19, 2016: Added faster level 2 + 3 compression modes. +* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progression in terms of compression. New default level is 5. +* Feb 14, 2016: Snappy: Merge upstream changes. +* Feb 14, 2016: Snappy: Fix aggressive skipping. +* Feb 14, 2016: Snappy: Update benchmark. +* Feb 13, 2016: Deflate: Fixed assembler problem that could lead to sub-optimal compression. +* Feb 12, 2016: Snappy: Added AMD64 SSE 4.2 optimizations to matching, which makes easy to compress material run faster. Typical speedup is around 25%. +* Feb 9, 2016: Added Snappy package fork. This version is 5-7% faster, much more on hard to compress content. +* Jan 30, 2016: Optimize level 1 to 3 by not considering static dictionary or storing uncompressed. ~4-5% speedup. +* Jan 16, 2016: Optimization on deflate level 1,2,3 compression. +* Jan 8 2016: Merge [CL 18317](https://go-review.googlesource.com/#/c/18317): fix reading, writing of zip64 archives. +* Dec 8 2015: Make level 1 and -2 deterministic even if write size differs. +* Dec 8 2015: Split encoding functions, so hashing and matching can potentially be inlined. 1-3% faster on AMD64. 5% faster on other platforms. +* Dec 8 2015: Fixed rare [one byte out-of bounds read](https://github.com/klauspost/compress/issues/20). Please update! +* Nov 23 2015: Optimization on token writer. ~2-4% faster. Contributed by [@dsnet](https://github.com/dsnet). +* Nov 20 2015: Small optimization to bit writer on 64 bit systems. +* Nov 17 2015: Fixed out-of-bound errors if the underlying Writer returned an error. See [#15](https://github.com/klauspost/compress/issues/15). +* Nov 12 2015: Added [io.WriterTo](https://golang.org/pkg/io/#WriterTo) support to gzip/inflate. +* Nov 11 2015: Merged [CL 16669](https://go-review.googlesource.com/#/c/16669/4): archive/zip: enable overriding (de)compressors per file +* Oct 15 2015: Added skipping on uncompressible data. Random data speed up >5x. + +
+ +# deflate usage + +The packages are drop-in replacements for standard libraries. Simply replace the import path to use them: + +| old import | new import | Documentation +|--------------------|-----------------------------------------|--------------------| +| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc) +| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc) +| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc) +| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc) + +* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). + +You may also be interested in [pgzip](https://github.com/klauspost/pgzip), which is a drop in replacement for gzip, which support multithreaded compression on big files and the optimized [crc32](https://github.com/klauspost/crc32) package used by these packages. + +The packages contains the same as the standard library, so you can use the godoc for that: [gzip](http://golang.org/pkg/compress/gzip/), [zip](http://golang.org/pkg/archive/zip/), [zlib](http://golang.org/pkg/compress/zlib/), [flate](http://golang.org/pkg/compress/flate/). + +Currently there is only minor speedup on decompression (mostly CRC32 calculation). + +Memory usage is typically 1MB for a Writer. stdlib is in the same range. +If you expect to have a lot of concurrently allocated Writers consider using +the stateless compress described below. + +For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). + +To disable all assembly add `-tags=noasm`. This works across all packages. + +# Stateless compression + +This package offers stateless compression as a special option for gzip/deflate. +It will do compression but without maintaining any state between Write calls. + +This means there will be no memory kept between Write calls, but compression and speed will be suboptimal. + +This is only relevant in cases where you expect to run many thousands of compressors concurrently, +but with very little activity. This is *not* intended for regular web servers serving individual requests. + +Because of this, the size of actual Write calls will affect output size. + +In gzip, specify level `-3` / `gzip.StatelessCompression` to enable. + +For direct deflate use, NewStatelessWriter and StatelessDeflate are available. See [documentation](https://godoc.org/github.com/klauspost/compress/flate#NewStatelessWriter) + +A `bufio.Writer` can of course be used to control write sizes. For example, to use a 4KB buffer: + +```go + // replace 'ioutil.Discard' with your output. + gzw, err := gzip.NewWriterLevel(ioutil.Discard, gzip.StatelessCompression) + if err != nil { + return err + } + defer gzw.Close() + + w := bufio.NewWriterSize(gzw, 4096) + defer w.Flush() + + // Write to 'w' +``` + +This will only use up to 4KB in memory when the writer is idle. + +Compression is almost always worse than the fastest compression level +and each write will allocate (a little) memory. + +# Performance Update 2018 + +It has been a while since we have been looking at the speed of this package compared to the standard library, so I thought I would re-do my tests and give some overall recommendations based on the current state. All benchmarks have been performed with Go 1.10 on my Desktop Intel(R) Core(TM) i7-2600 CPU @3.40GHz. Since I last ran the tests, I have gotten more RAM, which means tests with big files are no longer limited by my SSD. + +The raw results are in my [updated spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). Due to cgo changes and upstream updates i could not get the cgo version of gzip to compile. Instead I included the [zstd](https://github.com/datadog/zstd) cgo implementation. If I get cgo gzip to work again, I might replace the results in the sheet. + +The columns to take note of are: *MB/s* - the throughput. *Reduction* - the data size reduction in percent of the original. *Rel Speed* relative speed compared to the standard library at the same level. *Smaller* - how many percent smaller is the compressed output compared to stdlib. Negative means the output was bigger. *Loss* means the loss (or gain) in compression as a percentage difference of the input. + +The `gzstd` (standard library gzip) and `gzkp` (this package gzip) only uses one CPU core. [`pgzip`](https://github.com/klauspost/pgzip), [`bgzf`](https://github.com/biogo/hts/tree/master/bgzf) uses all 4 cores. [`zstd`](https://github.com/DataDog/zstd) uses one core, and is a beast (but not Go, yet). + + +## Overall differences. + +There appears to be a roughly 5-10% speed advantage over the standard library when comparing at similar compression levels. + +The biggest difference you will see is the result of [re-balancing](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) the compression levels. I wanted by library to give a smoother transition between the compression levels than the standard library. + +This package attempts to provide a more smooth transition, where "1" is taking a lot of shortcuts, "5" is the reasonable trade-off and "9" is the "give me the best compression", and the values in between gives something reasonable in between. The standard library has big differences in levels 1-4, but levels 5-9 having no significant gains - often spending a lot more time than can be justified by the achieved compression. + +There are links to all the test data in the [spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) in the top left field on each tab. + +## Web Content + +This test set aims to emulate typical use in a web server. The test-set is 4GB data in 53k files, and is a mixture of (mostly) HTML, JS, CSS. + +Since level 1 and 9 are close to being the same code, they are quite close. But looking at the levels in-between the differences are quite big. + +Looking at level 6, this package is 88% faster, but will output about 6% more data. For a web server, this means you can serve 88% more data, but have to pay for 6% more bandwidth. You can draw your own conclusions on what would be the most expensive for your case. + +## Object files + +This test is for typical data files stored on a server. In this case it is a collection of Go precompiled objects. They are very compressible. + +The picture is similar to the web content, but with small differences since this is very compressible. Levels 2-3 offer good speed, but is sacrificing quite a bit of compression. + +The standard library seems suboptimal on level 3 and 4 - offering both worse compression and speed than level 6 & 7 of this package respectively. + +## Highly Compressible File + +This is a JSON file with very high redundancy. The reduction starts at 95% on level 1, so in real life terms we are dealing with something like a highly redundant stream of data, etc. + +It is definitely visible that we are dealing with specialized content here, so the results are very scattered. This package does not do very well at levels 1-4, but picks up significantly at level 5 and levels 7 and 8 offering great speed for the achieved compression. + +So if you know you content is extremely compressible you might want to go slightly higher than the defaults. The standard library has a huge gap between levels 3 and 4 in terms of speed (2.75x slowdown), so it offers little "middle ground". + +## Medium-High Compressible + +This is a pretty common test corpus: [enwik9](http://mattmahoney.net/dc/textdata.html). It contains the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. This is a very good test of typical text based compression and more data heavy streams. + +We see a similar picture here as in "Web Content". On equal levels some compression is sacrificed for more speed. Level 5 seems to be the best trade-off between speed and size, beating stdlib level 3 in both. + +## Medium Compressible + +I will combine two test sets, one [10GB file set](http://mattmahoney.net/dc/10gb.html) and a VM disk image (~8GB). Both contain different data types and represent a typical backup scenario. + +The most notable thing is how quickly the standard library drops to very low compression speeds around level 5-6 without any big gains in compression. Since this type of data is fairly common, this does not seem like good behavior. + + +## Un-compressible Content + +This is mainly a test of how good the algorithms are at detecting un-compressible input. The standard library only offers this feature with very conservative settings at level 1. Obviously there is no reason for the algorithms to try to compress input that cannot be compressed. The only downside is that it might skip some compressible data on false detections. + + +## Huffman only compression + +This compression library adds a special compression level, named `HuffmanOnly`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character. + +This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like '¤' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM). + +Since this type of compression has much less variance, the compression speed is mostly unaffected by the input data, and is usually more than *180MB/s* for a single core. + +The downside is that the compression ratio is usually considerably worse than even the fastest conventional compression. The compression ratio can never be better than 8:1 (12.5%). + +The linear time compression can be used as a "better than nothing" mode, where you cannot risk the encoder to slow down on some content. For comparison, the size of the "Twain" text is *233460 bytes* (+29% vs. level 1) and encode speed is 144MB/s (4.5x level 1). So in this case you trade a 30% size increase for a 4 times speedup. + +For more information see my blog post on [Fast Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/). + +This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip. + +# Other packages + +Here are other packages of good quality and pure Go (no cgo wrappers or autoconverted code): + +* [github.com/pierrec/lz4](https://github.com/pierrec/lz4) - strong multithreaded LZ4 compression. +* [github.com/cosnicolaou/pbzip2](https://github.com/cosnicolaou/pbzip2) - multithreaded bzip2 decompression. +* [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer. +* [github.com/ronanh/intcomp](https://github.com/ronanh/intcomp) - Integer compression. +* [github.com/spenczar/fpc](https://github.com/spenczar/fpc) - Float compression. +* [github.com/minio/zipindex](https://github.com/minio/zipindex) - External ZIP directory index. +* [github.com/ybirader/pzip](https://github.com/ybirader/pzip) - Fast concurrent zip archiver and extractor. + +# license + +This code is licensed under the same conditions as the original Go code. See LICENSE file. diff --git a/vendor/github.com/klauspost/compress/SECURITY.md b/vendor/github.com/klauspost/compress/SECURITY.md new file mode 100644 index 00000000..ca6685e2 --- /dev/null +++ b/vendor/github.com/klauspost/compress/SECURITY.md @@ -0,0 +1,25 @@ +# Security Policy + +## Supported Versions + +Security updates are applied only to the latest release. + +## Vulnerability Definition + +A security vulnerability is a bug that with certain input triggers a crash or an infinite loop. Most calls will have varying execution time and only in rare cases will slow operation be considered a security vulnerability. + +Corrupted output generally is not considered a security vulnerability, unless independent operations are able to affect each other. Note that not all functionality is re-entrant and safe to use concurrently. + +Out-of-memory crashes only applies if the en/decoder uses an abnormal amount of memory, with appropriate options applied, to limit maximum window size, concurrency, etc. However, if you are in doubt you are welcome to file a security issue. + +It is assumed that all callers are trusted, meaning internal data exposed through reflection or inspection of returned data structures is not considered a vulnerability. + +Vulnerabilities resulting from compiler/assembler errors should be reported upstream. Depending on the severity this package may or may not implement a workaround. + +## Reporting a Vulnerability + +If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released. + +Please disclose it at [security advisory](https://github.com/klauspost/compress/security/advisories/new). If possible please provide a minimal reproducer. If the issue only applies to a single platform, it would be helpful to provide access to that. + +This project is maintained by a team of volunteers on a reasonable-effort basis. As such, vulnerabilities will be disclosed in a best effort base. diff --git a/vendor/github.com/klauspost/compress/compressible.go b/vendor/github.com/klauspost/compress/compressible.go new file mode 100644 index 00000000..ea5a692d --- /dev/null +++ b/vendor/github.com/klauspost/compress/compressible.go @@ -0,0 +1,85 @@ +package compress + +import "math" + +// Estimate returns a normalized compressibility estimate of block b. +// Values close to zero are likely uncompressible. +// Values above 0.1 are likely to be compressible. +// Values above 0.5 are very compressible. +// Very small lengths will return 0. +func Estimate(b []byte) float64 { + if len(b) < 16 { + return 0 + } + + // Correctly predicted order 1 + hits := 0 + lastMatch := false + var o1 [256]byte + var hist [256]int + c1 := byte(0) + for _, c := range b { + if c == o1[c1] { + // We only count a hit if there was two correct predictions in a row. + if lastMatch { + hits++ + } + lastMatch = true + } else { + lastMatch = false + } + o1[c1] = c + c1 = c + hist[c]++ + } + + // Use x^0.6 to give better spread + prediction := math.Pow(float64(hits)/float64(len(b)), 0.6) + + // Calculate histogram distribution + variance := float64(0) + avg := float64(len(b)) / 256 + + for _, v := range hist { + Δ := float64(v) - avg + variance += Δ * Δ + } + + stddev := math.Sqrt(float64(variance)) / float64(len(b)) + exp := math.Sqrt(1 / float64(len(b))) + + // Subtract expected stddev + stddev -= exp + if stddev < 0 { + stddev = 0 + } + stddev *= 1 + exp + + // Use x^0.4 to give better spread + entropy := math.Pow(stddev, 0.4) + + // 50/50 weight between prediction and histogram distribution + return math.Pow((prediction+entropy)/2, 0.9) +} + +// ShannonEntropyBits returns the number of bits minimum required to represent +// an entropy encoding of the input bytes. +// https://en.wiktionary.org/wiki/Shannon_entropy +func ShannonEntropyBits(b []byte) int { + if len(b) == 0 { + return 0 + } + var hist [256]int + for _, c := range b { + hist[c]++ + } + shannon := float64(0) + invTotal := 1.0 / float64(len(b)) + for _, v := range hist[:] { + if v > 0 { + n := float64(v) + shannon += math.Ceil(-math.Log2(n*invTotal) * n) + } + } + return int(math.Ceil(shannon)) +} diff --git a/vendor/github.com/klauspost/compress/fse/README.md b/vendor/github.com/klauspost/compress/fse/README.md new file mode 100644 index 00000000..ea7324da --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/README.md @@ -0,0 +1,79 @@ +# Finite State Entropy + +This package provides Finite State Entropy encoding and decoding. + +Finite State Entropy (also referenced as [tANS](https://en.wikipedia.org/wiki/Asymmetric_numeral_systems#tANS)) +encoding provides a fast near-optimal symbol encoding/decoding +for byte blocks as implemented in [zstandard](https://github.com/facebook/zstd). + +This can be used for compressing input with a lot of similar input values to the smallest number of bytes. +This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, +but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. + +* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/fse) + +## News + + * Feb 2018: First implementation released. Consider this beta software for now. + +# Usage + +This package provides a low level interface that allows to compress single independent blocks. + +Each block is separate, and there is no built in integrity checks. +This means that the caller should keep track of block sizes and also do checksums if needed. + +Compressing a block is done via the [`Compress`](https://godoc.org/github.com/klauspost/compress/fse#Compress) function. +You must provide input and will receive the output and maybe an error. + +These error values can be returned: + +| Error | Description | +|---------------------|-----------------------------------------------------------------------------| +| `` | Everything ok, output is returned | +| `ErrIncompressible` | Returned when input is judged to be too hard to compress | +| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | +| `(error)` | An internal error occurred. | + +As can be seen above there are errors that will be returned even under normal operation so it is important to handle these. + +To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/fse#Scratch) object +that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same +object can be used for both. + +Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this +you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. + +Decompressing is done by calling the [`Decompress`](https://godoc.org/github.com/klauspost/compress/fse#Decompress) function. +You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back +your input was likely corrupted. + +It is important to note that a successful decoding does *not* mean your output matches your original input. +There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. + +For more detailed usage, see examples in the [godoc documentation](https://godoc.org/github.com/klauspost/compress/fse#pkg-examples). + +# Performance + +A lot of factors are affecting speed. Block sizes and compressibility of the material are primary factors. +All compression functions are currently only running on the calling goroutine so only one core will be used per block. + +The compressor is significantly faster if symbols are kept as small as possible. The highest byte value of the input +is used to reduce some of the processing, so if all your input is above byte value 64 for instance, it may be +beneficial to transpose all your input values down by 64. + +With moderate block sizes around 64k speed are typically 200MB/s per core for compression and +around 300MB/s decompression speed. + +The same hardware typically does Huffman (deflate) encoding at 125MB/s and decompression at 100MB/s. + +# Plans + +At one point, more internals will be exposed to facilitate more "expert" usage of the components. + +A streaming interface is also likely to be implemented. Likely compatible with [FSE stream format](https://github.com/Cyan4973/FiniteStateEntropy/blob/dev/programs/fileio.c#L261). + +# Contributing + +Contributions are always welcome. Be aware that adding public functions will require good justification and breaking +changes will likely not be accepted. If in doubt open an issue before writing the PR. \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/bitreader.go b/vendor/github.com/klauspost/compress/fse/bitreader.go new file mode 100644 index 00000000..f65eb390 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bitreader.go @@ -0,0 +1,122 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import ( + "encoding/binary" + "errors" + "io" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReader struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReader) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.bitsRead += 8 - uint8(highBits(uint32(v))) + return nil +} + +// getBits will return n bits. n can be 0. +func (b *bitReader) getBits(n uint8) uint16 { + if n == 0 || b.bitsRead >= 64 { + return 0 + } + return b.getBitsFast(n) +} + +// getBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReader) getBitsFast(n uint8) uint16 { + const regMask = 64 - 1 + v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) + b.bitsRead += n + return v +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReader) fillFast() { + if b.bitsRead < 32 { + return + } + // 2 bounds checks. + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReader) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value = (b.value << 8) | uint64(b.in[b.off-1]) + b.bitsRead -= 8 + b.off-- + } +} + +// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. +func (b *bitReader) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReader) finished() bool { + return b.bitsRead >= 64 && b.off == 0 +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReader) close() error { + // Release reference. + b.in = nil + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/fse/bitwriter.go b/vendor/github.com/klauspost/compress/fse/bitwriter.go new file mode 100644 index 00000000..e82fa3bb --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bitwriter.go @@ -0,0 +1,167 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import "fmt" + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// bitMask16 is bitmasks. Has extra to avoid bounds check. +var bitMask16 = [32]uint16{ + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF} /* up to 16 bits */ + +// addBits16NC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16NC(value uint16, bits uint8) { + b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16ZeroNC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +// This is fastest if bits can be zero. +func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) { + if bits == 0 { + return + } + value <<= (16 - bits) & 15 + value >>= (16 - bits) & 15 + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// flush will flush all pending full bytes. +// There will be at least 56 bits available for writing when this has been called. +// Using flush32 is faster, but leaves less space for writing. +func (b *bitWriter) flush() { + v := b.nBits >> 3 + switch v { + case 0: + case 1: + b.out = append(b.out, + byte(b.bitContainer), + ) + case 2: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + ) + case 3: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + ) + case 4: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + ) + case 5: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + ) + case 6: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + ) + case 7: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + ) + case 8: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + byte(b.bitContainer>>56), + ) + default: + panic(fmt.Errorf("bits (%d) > 64", b.nBits)) + } + b.bitContainer >>= v << 3 + b.nBits &= 7 +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() +} + +// reset and continue writing by appending to out. +func (b *bitWriter) reset(out []byte) { + b.bitContainer = 0 + b.nBits = 0 + b.out = out +} diff --git a/vendor/github.com/klauspost/compress/fse/bytereader.go b/vendor/github.com/klauspost/compress/fse/bytereader.go new file mode 100644 index 00000000..abade2d6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bytereader.go @@ -0,0 +1,47 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +// byteReader provides a byte reader that reads +// little endian values from a byte stream. +// The input stream is manually advanced. +// The reader performs no bounds checks. +type byteReader struct { + b []byte + off int +} + +// init will initialize the reader and set the input. +func (b *byteReader) init(in []byte) { + b.b = in + b.off = 0 +} + +// advance the stream b n bytes. +func (b *byteReader) advance(n uint) { + b.off += int(n) +} + +// Uint32 returns a little endian uint32 starting at current offset. +func (b byteReader) Uint32() uint32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// unread returns the unread portion of the input. +func (b byteReader) unread() []byte { + return b.b[b.off:] +} + +// remain will return the number of bytes remaining. +func (b byteReader) remain() int { + return len(b.b) - b.off +} diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go new file mode 100644 index 00000000..074018d8 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/compress.go @@ -0,0 +1,683 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import ( + "errors" + "fmt" +) + +// Compress the input bytes. Input must be < 2GB. +// Provide a Scratch buffer to avoid memory allocations. +// Note that the output is also kept in the scratch buffer. +// If input is too hard to compress, ErrIncompressible is returned. +// If input is a single byte value repeated ErrUseRLE is returned. +func Compress(in []byte, s *Scratch) ([]byte, error) { + if len(in) <= 1 { + return nil, ErrIncompressible + } + if len(in) > (2<<30)-1 { + return nil, errors.New("input too big, must be < 2GB") + } + s, err := s.prepare(in) + if err != nil { + return nil, err + } + + // Create histogram, if none was provided. + maxCount := s.maxCount + if maxCount == 0 { + maxCount = s.countSimple(in) + } + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount == len(in) { + // One symbol, use RLE + return nil, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return nil, ErrIncompressible + } + s.optimalTableLog() + err = s.normalizeCount() + if err != nil { + return nil, err + } + err = s.writeCount() + if err != nil { + return nil, err + } + + if false { + err = s.validateNorm() + if err != nil { + return nil, err + } + } + + err = s.buildCTable() + if err != nil { + return nil, err + } + err = s.compress(in) + if err != nil { + return nil, err + } + s.Out = s.bw.out + // Check if we compressed. + if len(s.Out) >= len(in) { + return nil, ErrIncompressible + } + return s.Out, nil +} + +// cState contains the compression state of a stream. +type cState struct { + bw *bitWriter + stateTable []uint16 + state uint16 +} + +// init will initialize the compression state to the first symbol of the stream. +func (c *cState) init(bw *bitWriter, ct *cTable, tableLog uint8, first symbolTransform) { + c.bw = bw + c.stateTable = ct.stateTable + + nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 + im := int32((nbBitsOut << 16) - first.deltaNbBits) + lu := (im >> nbBitsOut) + first.deltaFindState + c.state = c.stateTable[lu] +} + +// encode the output symbol provided and write it to the bitstream. +func (c *cState) encode(symbolTT symbolTransform) { + nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 + dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState + c.bw.addBits16NC(c.state, uint8(nbBitsOut)) + c.state = c.stateTable[dstState] +} + +// encode the output symbol provided and write it to the bitstream. +func (c *cState) encodeZero(symbolTT symbolTransform) { + nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 + dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState + c.bw.addBits16ZeroNC(c.state, uint8(nbBitsOut)) + c.state = c.stateTable[dstState] +} + +// flush will write the tablelog to the output and flush the remaining full bytes. +func (c *cState) flush(tableLog uint8) { + c.bw.flush32() + c.bw.addBits16NC(c.state, tableLog) + c.bw.flush() +} + +// compress is the main compression loop that will encode the input from the last byte to the first. +func (s *Scratch) compress(src []byte) error { + if len(src) <= 2 { + return errors.New("compress: src too small") + } + tt := s.ct.symbolTT[:256] + s.bw.reset(s.Out) + + // Our two states each encodes every second byte. + // Last byte encoded (first byte decoded) will always be encoded by c1. + var c1, c2 cState + + // Encode so remaining size is divisible by 4. + ip := len(src) + if ip&1 == 1 { + c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) + c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) + c1.encodeZero(tt[src[ip-3]]) + ip -= 3 + } else { + c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) + c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) + ip -= 2 + } + if ip&2 != 0 { + c2.encodeZero(tt[src[ip-1]]) + c1.encodeZero(tt[src[ip-2]]) + ip -= 2 + } + src = src[:ip] + + // Main compression loop. + switch { + case !s.zeroBits && s.actualTableLog <= 8: + // We can encode 4 symbols without requiring a flush. + // We do not need to check if any output is 0 bits. + for ; len(src) >= 4; src = src[:len(src)-4] { + s.bw.flush32() + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] + c2.encode(tt[v0]) + c1.encode(tt[v1]) + c2.encode(tt[v2]) + c1.encode(tt[v3]) + } + case !s.zeroBits: + // We do not need to check if any output is 0 bits. + for ; len(src) >= 4; src = src[:len(src)-4] { + s.bw.flush32() + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] + c2.encode(tt[v0]) + c1.encode(tt[v1]) + s.bw.flush32() + c2.encode(tt[v2]) + c1.encode(tt[v3]) + } + case s.actualTableLog <= 8: + // We can encode 4 symbols without requiring a flush + for ; len(src) >= 4; src = src[:len(src)-4] { + s.bw.flush32() + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] + c2.encodeZero(tt[v0]) + c1.encodeZero(tt[v1]) + c2.encodeZero(tt[v2]) + c1.encodeZero(tt[v3]) + } + default: + for ; len(src) >= 4; src = src[:len(src)-4] { + s.bw.flush32() + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] + c2.encodeZero(tt[v0]) + c1.encodeZero(tt[v1]) + s.bw.flush32() + c2.encodeZero(tt[v2]) + c1.encodeZero(tt[v3]) + } + } + + // Flush final state. + // Used to initialize state when decoding. + c2.flush(s.actualTableLog) + c1.flush(s.actualTableLog) + + s.bw.close() + return nil +} + +// writeCount will write the normalized histogram count to header. +// This is read back by readNCount. +func (s *Scratch) writeCount() error { + var ( + tableLog = s.actualTableLog + tableSize = 1 << tableLog + previous0 bool + charnum uint16 + + maxHeaderSize = ((int(s.symbolLen)*int(tableLog) + 4 + 2) >> 3) + 3 + + // Write Table Size + bitStream = uint32(tableLog - minTablelog) + bitCount = uint(4) + remaining = int16(tableSize + 1) /* +1 for extra accuracy */ + threshold = int16(tableSize) + nbBits = uint(tableLog + 1) + ) + if cap(s.Out) < maxHeaderSize { + s.Out = make([]byte, 0, s.br.remain()+maxHeaderSize) + } + outP := uint(0) + out := s.Out[:maxHeaderSize] + + // stops at 1 + for remaining > 1 { + if previous0 { + start := charnum + for s.norm[charnum] == 0 { + charnum++ + } + for charnum >= start+24 { + start += 24 + bitStream += uint32(0xFFFF) << bitCount + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + } + for charnum >= start+3 { + start += 3 + bitStream += 3 << bitCount + bitCount += 2 + } + bitStream += uint32(charnum-start) << bitCount + bitCount += 2 + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + count := s.norm[charnum] + charnum++ + max := (2*threshold - 1) - remaining + if count < 0 { + remaining += count + } else { + remaining -= count + } + count++ // +1 for extra accuracy + if count >= threshold { + count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ + } + bitStream += uint32(count) << bitCount + bitCount += nbBits + if count < max { + bitCount-- + } + + previous0 = count == 1 + if remaining < 1 { + return errors.New("internal error: remaining<1") + } + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += (bitCount + 7) / 8 + + if charnum > s.symbolLen { + return errors.New("internal error: charnum > s.symbolLen") + } + s.Out = out[:outP] + return nil +} + +// symbolTransform contains the state transform for a symbol. +type symbolTransform struct { + deltaFindState int32 + deltaNbBits uint32 +} + +// String prints values as a human readable string. +func (s symbolTransform) String() string { + return fmt.Sprintf("dnbits: %08x, fs:%d", s.deltaNbBits, s.deltaFindState) +} + +// cTable contains tables used for compression. +type cTable struct { + tableSymbol []byte + stateTable []uint16 + symbolTT []symbolTransform +} + +// allocCtable will allocate tables needed for compression. +// If existing tables a re big enough, they are simply re-used. +func (s *Scratch) allocCtable() { + tableSize := 1 << s.actualTableLog + // get tableSymbol that is big enough. + if cap(s.ct.tableSymbol) < tableSize { + s.ct.tableSymbol = make([]byte, tableSize) + } + s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] + + ctSize := tableSize + if cap(s.ct.stateTable) < ctSize { + s.ct.stateTable = make([]uint16, ctSize) + } + s.ct.stateTable = s.ct.stateTable[:ctSize] + + if cap(s.ct.symbolTT) < 256 { + s.ct.symbolTT = make([]symbolTransform, 256) + } + s.ct.symbolTT = s.ct.symbolTT[:256] +} + +// buildCTable will populate the compression table so it is ready to be used. +func (s *Scratch) buildCTable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + var cumul [maxSymbolValue + 2]int16 + + s.allocCtable() + tableSymbol := s.ct.tableSymbol[:tableSize] + // symbol start positions + { + cumul[0] = 0 + for ui, v := range s.norm[:s.symbolLen-1] { + u := byte(ui) // one less than reference + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = u + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + } + // Encode last symbol separately to avoid overflowing u + u := int(s.symbolLen - 1) + v := s.norm[s.symbolLen-1] + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = byte(u) + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + if uint32(cumul[s.symbolLen]) != tableSize { + return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) + } + cumul[s.symbolLen] = int16(tableSize) + 1 + } + // Spread symbols + s.zeroBits = false + { + step := tableStep(tableSize) + tableMask := tableSize - 1 + var position uint32 + // if any symbol > largeLimit, we may have 0 bits output. + largeLimit := int16(1 << (s.actualTableLog - 1)) + for ui, v := range s.norm[:s.symbolLen] { + symbol := byte(ui) + if v > largeLimit { + s.zeroBits = true + } + for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { + tableSymbol[position] = symbol + position = (position + step) & tableMask + for position > highThreshold { + position = (position + step) & tableMask + } /* Low proba area */ + } + } + + // Check if we have gone through all positions + if position != 0 { + return errors.New("position!=0") + } + } + + // Build table + table := s.ct.stateTable + { + tsi := int(tableSize) + for u, v := range tableSymbol { + // TableU16 : sorted by symbol order; gives next state value + table[cumul[v]] = uint16(tsi + u) + cumul[v]++ + } + } + + // Build Symbol Transformation Table + { + total := int16(0) + symbolTT := s.ct.symbolTT[:s.symbolLen] + tableLog := s.actualTableLog + tl := (uint32(tableLog) << 16) - (1 << tableLog) + for i, v := range s.norm[:s.symbolLen] { + switch v { + case 0: + case -1, 1: + symbolTT[i].deltaNbBits = tl + symbolTT[i].deltaFindState = int32(total - 1) + total++ + default: + maxBitsOut := uint32(tableLog) - highBits(uint32(v-1)) + minStatePlus := uint32(v) << maxBitsOut + symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus + symbolTT[i].deltaFindState = int32(total - v) + total += v + } + } + if total != int16(tableSize) { + return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) + } + } + return nil +} + +// countSimple will create a simple histogram in s.count. +// Returns the biggest count. +// Does not update s.clearCount. +func (s *Scratch) countSimple(in []byte) (max int) { + for _, v := range in { + s.count[v]++ + } + m, symlen := uint32(0), s.symbolLen + for i, v := range s.count[:] { + if v == 0 { + continue + } + if v > m { + m = v + } + symlen = uint16(i) + 1 + } + s.symbolLen = symlen + return int(m) +} + +// minTableLog provides the minimum logSize to safely represent a distribution. +func (s *Scratch) minTableLog() uint8 { + minBitsSrc := highBits(uint32(s.br.remain()-1)) + 1 + minBitsSymbols := highBits(uint32(s.symbolLen-1)) + 2 + if minBitsSrc < minBitsSymbols { + return uint8(minBitsSrc) + } + return uint8(minBitsSymbols) +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *Scratch) optimalTableLog() { + tableLog := s.TableLog + minBits := s.minTableLog() + maxBitsSrc := uint8(highBits(uint32(s.br.remain()-1))) - 2 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minTablelog { + tableLog = minTablelog + } + if tableLog > maxTableLog { + tableLog = maxTableLog + } + s.actualTableLog = tableLog +} + +var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} + +// normalizeCount will normalize the count of the symbols so +// the total is equal to the table size. +func (s *Scratch) normalizeCount() error { + var ( + tableLog = s.actualTableLog + scale = 62 - uint64(tableLog) + step = (1 << 62) / uint64(s.br.remain()) + vStep = uint64(1) << (scale - 20) + stillToDistribute = int16(1 << tableLog) + largest int + largestP int16 + lowThreshold = (uint32)(s.br.remain() >> tableLog) + ) + + for i, cnt := range s.count[:s.symbolLen] { + // already handled + // if (count[s] == s.length) return 0; /* rle special case */ + + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + stillToDistribute-- + } else { + proba := (int16)((uint64(cnt) * step) >> scale) + if proba < 8 { + restToBeat := vStep * uint64(rtbTable[proba]) + v := uint64(cnt)*step - (uint64(proba) << scale) + if v > restToBeat { + proba++ + } + } + if proba > largestP { + largestP = proba + largest = i + } + s.norm[i] = proba + stillToDistribute -= proba + } + } + + if -stillToDistribute >= (s.norm[largest] >> 1) { + // corner case, need another normalization method + return s.normalizeCount2() + } + s.norm[largest] += stillToDistribute + return nil +} + +// Secondary normalization method. +// To be used when primary method fails. +func (s *Scratch) normalizeCount2() error { + const notYetAssigned = -2 + var ( + distributed uint32 + total = uint32(s.br.remain()) + tableLog = s.actualTableLog + lowThreshold = total >> tableLog + lowOne = (total * 3) >> (tableLog + 1) + ) + for i, cnt := range s.count[:s.symbolLen] { + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + distributed++ + total -= cnt + continue + } + if cnt <= lowOne { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + s.norm[i] = notYetAssigned + } + toDistribute := (1 << tableLog) - distributed + + if (total / toDistribute) > lowOne { + // risk of rounding to zero + lowOne = (total * 3) / (toDistribute * 2) + for i, cnt := range s.count[:s.symbolLen] { + if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + } + toDistribute = (1 << tableLog) - distributed + } + if distributed == uint32(s.symbolLen)+1 { + // all values are pretty poor; + // probably incompressible data (should have already been detected); + // find max, then give all remaining points to max + var maxV int + var maxC uint32 + for i, cnt := range s.count[:s.symbolLen] { + if cnt > maxC { + maxV = i + maxC = cnt + } + } + s.norm[maxV] += int16(toDistribute) + return nil + } + + if total == 0 { + // all of the symbols were low enough for the lowOne or lowThreshold + for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { + if s.norm[i] > 0 { + toDistribute-- + s.norm[i]++ + } + } + return nil + } + + var ( + vStepLog = 62 - uint64(tableLog) + mid = uint64((1 << (vStepLog - 1)) - 1) + rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining + tmpTotal = mid + ) + for i, cnt := range s.count[:s.symbolLen] { + if s.norm[i] == notYetAssigned { + var ( + end = tmpTotal + uint64(cnt)*rStep + sStart = uint32(tmpTotal >> vStepLog) + sEnd = uint32(end >> vStepLog) + weight = sEnd - sStart + ) + if weight < 1 { + return errors.New("weight < 1") + } + s.norm[i] = int16(weight) + tmpTotal = end + } + } + return nil +} + +// validateNorm validates the normalized histogram table. +func (s *Scratch) validateNorm() (err error) { + var total int + for _, v := range s.norm[:s.symbolLen] { + if v >= 0 { + total += int(v) + } else { + total -= int(v) + } + } + defer func() { + if err == nil { + return + } + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) + for i, v := range s.norm[:s.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) + } + }() + if total != (1 << s.actualTableLog) { + return fmt.Errorf("warning: Total == %d != %d", total, 1< tablelogAbsoluteMax { + return errors.New("tableLog too large") + } + bitStream >>= 4 + bitCount := uint(4) + + s.actualTableLog = uint8(nbBits) + remaining := int32((1 << nbBits) + 1) + threshold := int32(1 << nbBits) + gotTotal := int32(0) + nbBits++ + + for remaining > 1 { + if previous0 { + n0 := charnum + for (bitStream & 0xFFFF) == 0xFFFF { + n0 += 24 + if b.off < iend-5 { + b.advance(2) + bitStream = b.Uint32() >> bitCount + } else { + bitStream >>= 16 + bitCount += 16 + } + } + for (bitStream & 3) == 3 { + n0 += 3 + bitStream >>= 2 + bitCount += 2 + } + n0 += uint16(bitStream & 3) + bitCount += 2 + if n0 > maxSymbolValue { + return errors.New("maxSymbolValue too small") + } + for charnum < n0 { + s.norm[charnum&0xff] = 0 + charnum++ + } + + if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { + b.advance(bitCount >> 3) + bitCount &= 7 + bitStream = b.Uint32() >> bitCount + } else { + bitStream >>= 2 + } + } + + max := (2*(threshold) - 1) - (remaining) + var count int32 + + if (int32(bitStream) & (threshold - 1)) < max { + count = int32(bitStream) & (threshold - 1) + bitCount += nbBits - 1 + } else { + count = int32(bitStream) & (2*threshold - 1) + if count >= threshold { + count -= max + } + bitCount += nbBits + } + + count-- // extra accuracy + if count < 0 { + // -1 means +1 + remaining += count + gotTotal -= count + } else { + remaining -= count + gotTotal += count + } + s.norm[charnum&0xff] = int16(count) + charnum++ + previous0 = count == 0 + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { + b.advance(bitCount >> 3) + bitCount &= 7 + } else { + bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) + b.off = len(b.b) - 4 + } + bitStream = b.Uint32() >> (bitCount & 31) + } + s.symbolLen = charnum + + if s.symbolLen <= 1 { + return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) + } + if s.symbolLen > maxSymbolValue+1 { + return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) + } + if remaining != 1 { + return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) + } + if bitCount > 32 { + return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) + } + if gotTotal != 1<> 3) + return nil +} + +// decSymbol contains information about a state entry, +// Including the state offset base, the output symbol and +// the number of bits to read for the low part of the destination state. +type decSymbol struct { + newState uint16 + symbol uint8 + nbBits uint8 +} + +// allocDtable will allocate decoding tables if they are not big enough. +func (s *Scratch) allocDtable() { + tableSize := 1 << s.actualTableLog + if cap(s.decTable) < tableSize { + s.decTable = make([]decSymbol, tableSize) + } + s.decTable = s.decTable[:tableSize] + + if cap(s.ct.tableSymbol) < 256 { + s.ct.tableSymbol = make([]byte, 256) + } + s.ct.tableSymbol = s.ct.tableSymbol[:256] + + if cap(s.ct.stateTable) < 256 { + s.ct.stateTable = make([]uint16, 256) + } + s.ct.stateTable = s.ct.stateTable[:256] +} + +// buildDtable will build the decoding table. +func (s *Scratch) buildDtable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + s.allocDtable() + symbolNext := s.ct.stateTable[:256] + + // Init, lay down lowprob symbols + s.zeroBits = false + { + largeLimit := int16(1 << (s.actualTableLog - 1)) + for i, v := range s.norm[:s.symbolLen] { + if v == -1 { + s.decTable[highThreshold].symbol = uint8(i) + highThreshold-- + symbolNext[i] = 1 + } else { + if v >= largeLimit { + s.zeroBits = true + } + symbolNext[i] = uint16(v) + } + } + } + // Spread symbols + { + tableMask := tableSize - 1 + step := tableStep(tableSize) + position := uint32(0) + for ss, v := range s.norm[:s.symbolLen] { + for i := 0; i < int(v); i++ { + s.decTable[position].symbol = uint8(ss) + position = (position + step) & tableMask + for position > highThreshold { + // lowprob area + position = (position + step) & tableMask + } + } + } + if position != 0 { + // position must reach all cells once, otherwise normalizedCounter is incorrect + return errors.New("corrupted input (position != 0)") + } + } + + // Build Decoding table + { + tableSize := uint16(1 << s.actualTableLog) + for u, v := range s.decTable { + symbol := v.symbol + nextState := symbolNext[symbol] + symbolNext[symbol] = nextState + 1 + nBits := s.actualTableLog - byte(highBits(uint32(nextState))) + s.decTable[u].nbBits = nBits + newState := (nextState << nBits) - tableSize + if newState >= tableSize { + return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) + } + if newState == uint16(u) && nBits == 0 { + // Seems weird that this is possible with nbits > 0. + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) + } + s.decTable[u].newState = newState + } + } + return nil +} + +// decompress will decompress the bitstream. +// If the buffer is over-read an error is returned. +func (s *Scratch) decompress() error { + br := &s.bits + if err := br.init(s.br.unread()); err != nil { + return err + } + + var s1, s2 decoder + // Initialize and decode first state and symbol. + s1.init(br, s.decTable, s.actualTableLog) + s2.init(br, s.decTable, s.actualTableLog) + + // Use temp table to avoid bound checks/append penalty. + var tmp = s.ct.tableSymbol[:256] + var off uint8 + + // Main part + if !s.zeroBits { + for br.off >= 8 { + br.fillFast() + tmp[off+0] = s1.nextFast() + tmp[off+1] = s2.nextFast() + br.fillFast() + tmp[off+2] = s1.nextFast() + tmp[off+3] = s2.nextFast() + off += 4 + // When off is 0, we have overflowed and should write. + if off == 0 { + s.Out = append(s.Out, tmp...) + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + } + } else { + for br.off >= 8 { + br.fillFast() + tmp[off+0] = s1.next() + tmp[off+1] = s2.next() + br.fillFast() + tmp[off+2] = s1.next() + tmp[off+3] = s2.next() + off += 4 + if off == 0 { + s.Out = append(s.Out, tmp...) + // When off is 0, we have overflowed and should write. + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + } + } + s.Out = append(s.Out, tmp[:off]...) + + // Final bits, a bit more expensive check + for { + if s1.finished() { + s.Out = append(s.Out, s1.final(), s2.final()) + break + } + br.fill() + s.Out = append(s.Out, s1.next()) + if s2.finished() { + s.Out = append(s.Out, s2.final(), s1.final()) + break + } + s.Out = append(s.Out, s2.next()) + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + return br.close() +} + +// decoder keeps track of the current state and updates it from the bitstream. +type decoder struct { + state uint16 + br *bitReader + dt []decSymbol +} + +// init will initialize the decoder and read the first state from the stream. +func (d *decoder) init(in *bitReader, dt []decSymbol, tableLog uint8) { + d.dt = dt + d.br = in + d.state = in.getBits(tableLog) +} + +// next returns the next symbol and sets the next state. +// At least tablelog bits must be available in the bit reader. +func (d *decoder) next() uint8 { + n := &d.dt[d.state] + lowBits := d.br.getBits(n.nbBits) + d.state = n.newState + lowBits + return n.symbol +} + +// finished returns true if all bits have been read from the bitstream +// and the next state would require reading bits from the input. +func (d *decoder) finished() bool { + return d.br.finished() && d.dt[d.state].nbBits > 0 +} + +// final returns the current state symbol without decoding the next. +func (d *decoder) final() uint8 { + return d.dt[d.state].symbol +} + +// nextFast returns the next symbol and sets the next state. +// This can only be used if no symbols are 0 bits. +// At least tablelog bits must be available in the bit reader. +func (d *decoder) nextFast() uint8 { + n := d.dt[d.state] + lowBits := d.br.getBitsFast(n.nbBits) + d.state = n.newState + lowBits + return n.symbol +} diff --git a/vendor/github.com/klauspost/compress/fse/fse.go b/vendor/github.com/klauspost/compress/fse/fse.go new file mode 100644 index 00000000..535cbadf --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fse.go @@ -0,0 +1,144 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +// Package fse provides Finite State Entropy encoding and decoding. +// +// Finite State Entropy encoding provides a fast near-optimal symbol encoding/decoding +// for byte blocks as implemented in zstd. +// +// See https://github.com/klauspost/compress/tree/master/fse for more information. +package fse + +import ( + "errors" + "fmt" + "math/bits" +) + +const ( + /*!MEMORY_USAGE : + * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) + * Increasing memory usage improves compression ratio + * Reduced memory usage can improve speed, due to cache effect + * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ + maxMemoryUsage = 14 + defaultMemoryUsage = 13 + + maxTableLog = maxMemoryUsage - 2 + maxTablesize = 1 << maxTableLog + defaultTablelog = defaultMemoryUsage - 2 + minTablelog = 5 + maxSymbolValue = 255 +) + +var ( + // ErrIncompressible is returned when input is judged to be too hard to compress. + ErrIncompressible = errors.New("input is not compressible") + + // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. + ErrUseRLE = errors.New("input is single value repeated") +) + +// Scratch provides temporary storage for compression and decompression. +type Scratch struct { + // Private + count [maxSymbolValue + 1]uint32 + norm [maxSymbolValue + 1]int16 + br byteReader + bits bitReader + bw bitWriter + ct cTable // Compression tables. + decTable []decSymbol // Decompression table. + maxCount int // count of the most probable symbol + + // Per block parameters. + // These can be used to override compression parameters of the block. + // Do not touch, unless you know what you are doing. + + // Out is output buffer. + // If the scratch is re-used before the caller is done processing the output, + // set this field to nil. + // Otherwise the output buffer will be re-used for next Compression/Decompression step + // and allocation will be avoided. + Out []byte + + // DecompressLimit limits the maximum decoded size acceptable. + // If > 0 decompression will stop when approximately this many bytes + // has been decoded. + // If 0, maximum size will be 2GB. + DecompressLimit int + + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + zeroBits bool // no bits has prob > 50%. + clearCount bool // clear count + + // MaxSymbolValue will override the maximum symbol value of the next block. + MaxSymbolValue uint8 + + // TableLog will attempt to override the tablelog for the next block. + TableLog uint8 +} + +// Histogram allows to populate the histogram and skip that step in the compression, +// It otherwise allows to inspect the histogram when compression is done. +// To indicate that you have populated the histogram call HistogramFinished +// with the value of the highest populated symbol, as well as the number of entries +// in the most populated entry. These are accepted at face value. +// The returned slice will always be length 256. +func (s *Scratch) Histogram() []uint32 { + return s.count[:] +} + +// HistogramFinished can be called to indicate that the histogram has been populated. +// maxSymbol is the index of the highest set symbol of the next data segment. +// maxCount is the number of entries in the most populated entry. +// These are accepted at face value. +func (s *Scratch) HistogramFinished(maxSymbol uint8, maxCount int) { + s.maxCount = maxCount + s.symbolLen = uint16(maxSymbol) + 1 + s.clearCount = maxCount != 0 +} + +// prepare will prepare and allocate scratch tables used for both compression and decompression. +func (s *Scratch) prepare(in []byte) (*Scratch, error) { + if s == nil { + s = &Scratch{} + } + if s.MaxSymbolValue == 0 { + s.MaxSymbolValue = 255 + } + if s.TableLog == 0 { + s.TableLog = defaultTablelog + } + if s.TableLog > maxTableLog { + return nil, fmt.Errorf("tableLog (%d) > maxTableLog (%d)", s.TableLog, maxTableLog) + } + if cap(s.Out) == 0 { + s.Out = make([]byte, 0, len(in)) + } + if s.clearCount && s.maxCount == 0 { + for i := range s.count { + s.count[i] = 0 + } + s.clearCount = false + } + s.br.init(in) + if s.DecompressLimit == 0 { + // Max size 2GB. + s.DecompressLimit = (2 << 30) - 1 + } + + return s, nil +} + +// tableStep returns the next table index. +func tableStep(tableSize uint32) uint32 { + return (tableSize >> 1) + (tableSize >> 3) + 3 +} + +func highBits(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/gen.sh b/vendor/github.com/klauspost/compress/gen.sh new file mode 100644 index 00000000..aff94220 --- /dev/null +++ b/vendor/github.com/klauspost/compress/gen.sh @@ -0,0 +1,4 @@ +#!/bin/sh + +cd s2/cmd/_s2sx/ || exit 1 +go generate . diff --git a/vendor/github.com/klauspost/compress/huff0/.gitignore b/vendor/github.com/klauspost/compress/huff0/.gitignore new file mode 100644 index 00000000..b3d26295 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/.gitignore @@ -0,0 +1 @@ +/huff0-fuzz.zip diff --git a/vendor/github.com/klauspost/compress/huff0/README.md b/vendor/github.com/klauspost/compress/huff0/README.md new file mode 100644 index 00000000..8b6e5c66 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/README.md @@ -0,0 +1,89 @@ +# Huff0 entropy compression + +This package provides Huff0 encoding and decoding as used in zstd. + +[Huff0](https://github.com/Cyan4973/FiniteStateEntropy#new-generation-entropy-coders), +a Huffman codec designed for modern CPU, featuring OoO (Out of Order) operations on multiple ALU +(Arithmetic Logic Unit), achieving extremely fast compression and decompression speeds. + +This can be used for compressing input with a lot of similar input values to the smallest number of bytes. +This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, +but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. + +* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/huff0) + +## News + +This is used as part of the [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression package. + +This ensures that most functionality is well tested. + +# Usage + +This package provides a low level interface that allows to compress single independent blocks. + +Each block is separate, and there is no built in integrity checks. +This means that the caller should keep track of block sizes and also do checksums if needed. + +Compressing a block is done via the [`Compress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress1X) and +[`Compress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress4X) functions. +You must provide input and will receive the output and maybe an error. + +These error values can be returned: + +| Error | Description | +|---------------------|-----------------------------------------------------------------------------| +| `` | Everything ok, output is returned | +| `ErrIncompressible` | Returned when input is judged to be too hard to compress | +| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | +| `ErrTooBig` | Returned if the input block exceeds the maximum allowed size (128 Kib) | +| `(error)` | An internal error occurred. | + + +As can be seen above some of there are errors that will be returned even under normal operation so it is important to handle these. + +To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object +that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same +object can be used for both. + +Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this +you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. + +The `Scratch` object will retain state that allows to re-use previous tables for encoding and decoding. + +## Tables and re-use + +Huff0 allows for reusing tables from the previous block to save space if that is expected to give better/faster results. + +The Scratch object allows you to set a [`ReusePolicy`](https://godoc.org/github.com/klauspost/compress/huff0#ReusePolicy) +that controls this behaviour. See the documentation for details. This can be altered between each block. + +Do however note that this information is *not* stored in the output block and it is up to the users of the package to +record whether [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable) should be called, +based on the boolean reported back from the CompressXX call. + +If you want to store the table separate from the data, you can access them as `OutData` and `OutTable` on the +[`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object. + +## Decompressing + +The first part of decoding is to initialize the decoding table through [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable). +This will initialize the decoding tables. +You can supply the complete block to `ReadTable` and it will return the data part of the block +which can be given to the decompressor. + +Decompressing is done by calling the [`Decompress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress1X) +or [`Decompress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress4X) function. + +For concurrently decompressing content with a fixed table a stateless [`Decoder`](https://godoc.org/github.com/klauspost/compress/huff0#Decoder) can be requested which will remain correct as long as the scratch is unchanged. The capacity of the provided slice indicates the expected output size. + +You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back +your input was likely corrupted. + +It is important to note that a successful decoding does *not* mean your output matches your original input. +There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. + +# Contributing + +Contributions are always welcome. Be aware that adding public functions will require good justification and breaking +changes will likely not be accepted. If in doubt open an issue before writing the PR. diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go new file mode 100644 index 00000000..e36d9742 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go @@ -0,0 +1,229 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package huff0 + +import ( + "encoding/binary" + "errors" + "fmt" + "io" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReaderBytes struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReaderBytes) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.advance(8 - uint8(highBit32(uint32(v)))) + return nil +} + +// peekBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReaderBytes) peekByteFast() uint8 { + got := uint8(b.value >> 56) + return got +} + +func (b *bitReaderBytes) advance(n uint8) { + b.bitsRead += n + b.value <<= n & 63 +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReaderBytes) fillFast() { + if b.bitsRead < 32 { + return + } + + // 2 bounds checks. + v := b.in[b.off-4 : b.off] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << (b.bitsRead - 32) + b.bitsRead -= 32 + b.off -= 4 +} + +// fillFastStart() assumes the bitReaderBytes is empty and there is at least 8 bytes to read. +func (b *bitReaderBytes) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReaderBytes) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4 : b.off] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << (b.bitsRead - 32) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value |= uint64(b.in[b.off-1]) << (b.bitsRead - 8) + b.bitsRead -= 8 + b.off-- + } +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReaderBytes) finished() bool { + return b.off == 0 && b.bitsRead >= 64 +} + +func (b *bitReaderBytes) remaining() uint { + return b.off*8 + uint(64-b.bitsRead) +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReaderBytes) close() error { + // Release reference. + b.in = nil + if b.remaining() > 0 { + return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) + } + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} + +// bitReaderShifted reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReaderShifted struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReaderShifted) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.advance(8 - uint8(highBit32(uint32(v)))) + return nil +} + +// peekBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 { + return uint16(b.value >> ((64 - n) & 63)) +} + +func (b *bitReaderShifted) advance(n uint8) { + b.bitsRead += n + b.value <<= n & 63 +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReaderShifted) fillFast() { + if b.bitsRead < 32 { + return + } + + // 2 bounds checks. + v := b.in[b.off-4 : b.off] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << ((b.bitsRead - 32) & 63) + b.bitsRead -= 32 + b.off -= 4 +} + +// fillFastStart() assumes the bitReaderShifted is empty and there is at least 8 bytes to read. +func (b *bitReaderShifted) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReaderShifted) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4 : b.off] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << ((b.bitsRead - 32) & 63) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value |= uint64(b.in[b.off-1]) << ((b.bitsRead - 8) & 63) + b.bitsRead -= 8 + b.off-- + } +} + +func (b *bitReaderShifted) remaining() uint { + return b.off*8 + uint(64-b.bitsRead) +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReaderShifted) close() error { + // Release reference. + b.in = nil + if b.remaining() > 0 { + return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) + } + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go new file mode 100644 index 00000000..0ebc9aaa --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go @@ -0,0 +1,102 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package huff0 + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// encSymbol will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) encSymbol(ct cTable, symbol byte) { + enc := ct[symbol] + b.bitContainer |= uint64(enc.val) << (b.nBits & 63) + if false { + if enc.nBits == 0 { + panic("nbits 0") + } + } + b.nBits += enc.nBits +} + +// encTwoSymbols will add up to 32 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) { + encA := ct[av] + encB := ct[bv] + sh := b.nBits & 63 + combined := uint64(encA.val) | (uint64(encB.val) << (encA.nBits & 63)) + b.bitContainer |= combined << sh + if false { + if encA.nBits == 0 { + panic("nbitsA 0") + } + if encB.nBits == 0 { + panic("nbitsB 0") + } + } + b.nBits += encA.nBits + encB.nBits +} + +// encFourSymbols adds up to 32 bits from four symbols. +// It will not check if there is space for them, +// so the caller must ensure that b has been flushed recently. +func (b *bitWriter) encFourSymbols(encA, encB, encC, encD cTableEntry) { + bitsA := encA.nBits + bitsB := bitsA + encB.nBits + bitsC := bitsB + encC.nBits + bitsD := bitsC + encD.nBits + combined := uint64(encA.val) | + (uint64(encB.val) << (bitsA & 63)) | + (uint64(encC.val) << (bitsB & 63)) | + (uint64(encD.val) << (bitsC & 63)) + b.bitContainer |= combined << (b.nBits & 63) + b.nBits += bitsD +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() +} diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go new file mode 100644 index 00000000..84aa3d12 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/compress.go @@ -0,0 +1,742 @@ +package huff0 + +import ( + "fmt" + "math" + "runtime" + "sync" +) + +// Compress1X will compress the input. +// The output can be decoded using Decompress1X. +// Supply a Scratch object. The scratch object contains state about re-use, +// So when sharing across independent encodes, be sure to set the re-use policy. +func Compress1X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { + s, err = s.prepare(in) + if err != nil { + return nil, false, err + } + return compress(in, s, s.compress1X) +} + +// Compress4X will compress the input. The input is split into 4 independent blocks +// and compressed similar to Compress1X. +// The output can be decoded using Decompress4X. +// Supply a Scratch object. The scratch object contains state about re-use, +// So when sharing across independent encodes, be sure to set the re-use policy. +func Compress4X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { + s, err = s.prepare(in) + if err != nil { + return nil, false, err + } + if false { + // TODO: compress4Xp only slightly faster. + const parallelThreshold = 8 << 10 + if len(in) < parallelThreshold || runtime.GOMAXPROCS(0) == 1 { + return compress(in, s, s.compress4X) + } + return compress(in, s, s.compress4Xp) + } + return compress(in, s, s.compress4X) +} + +func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)) (out []byte, reUsed bool, err error) { + // Nuke previous table if we cannot reuse anyway. + if s.Reuse == ReusePolicyNone { + s.prevTable = s.prevTable[:0] + } + + // Create histogram, if none was provided. + maxCount := s.maxCount + var canReuse = false + if maxCount == 0 { + maxCount, canReuse = s.countSimple(in) + } else { + canReuse = s.canUseTable(s.prevTable) + } + + // We want the output size to be less than this: + wantSize := len(in) + if s.WantLogLess > 0 { + wantSize -= wantSize >> s.WantLogLess + } + + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount >= len(in) { + if maxCount > len(in) { + return nil, false, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) + } + if len(in) == 1 { + return nil, false, ErrIncompressible + } + // One symbol, use RLE + return nil, false, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return nil, false, ErrIncompressible + } + if s.Reuse == ReusePolicyMust && !canReuse { + // We must reuse, but we can't. + return nil, false, ErrIncompressible + } + if (s.Reuse == ReusePolicyPrefer || s.Reuse == ReusePolicyMust) && canReuse { + keepTable := s.cTable + keepTL := s.actualTableLog + s.cTable = s.prevTable + s.actualTableLog = s.prevTableLog + s.Out, err = compressor(in) + s.cTable = keepTable + s.actualTableLog = keepTL + if err == nil && len(s.Out) < wantSize { + s.OutData = s.Out + return s.Out, true, nil + } + if s.Reuse == ReusePolicyMust { + return nil, false, ErrIncompressible + } + // Do not attempt to re-use later. + s.prevTable = s.prevTable[:0] + } + + // Calculate new table. + err = s.buildCTable() + if err != nil { + return nil, false, err + } + + if false && !s.canUseTable(s.cTable) { + panic("invalid table generated") + } + + if s.Reuse == ReusePolicyAllow && canReuse { + hSize := len(s.Out) + oldSize := s.prevTable.estimateSize(s.count[:s.symbolLen]) + newSize := s.cTable.estimateSize(s.count[:s.symbolLen]) + if oldSize <= hSize+newSize || hSize+12 >= wantSize { + // Retain cTable even if we re-use. + keepTable := s.cTable + keepTL := s.actualTableLog + + s.cTable = s.prevTable + s.actualTableLog = s.prevTableLog + s.Out, err = compressor(in) + + // Restore ctable. + s.cTable = keepTable + s.actualTableLog = keepTL + if err != nil { + return nil, false, err + } + if len(s.Out) >= wantSize { + return nil, false, ErrIncompressible + } + s.OutData = s.Out + return s.Out, true, nil + } + } + + // Use new table + err = s.cTable.write(s) + if err != nil { + s.OutTable = nil + return nil, false, err + } + s.OutTable = s.Out + + // Compress using new table + s.Out, err = compressor(in) + if err != nil { + s.OutTable = nil + return nil, false, err + } + if len(s.Out) >= wantSize { + s.OutTable = nil + return nil, false, ErrIncompressible + } + // Move current table into previous. + s.prevTable, s.prevTableLog, s.cTable = s.cTable, s.actualTableLog, s.prevTable[:0] + s.OutData = s.Out[len(s.OutTable):] + return s.Out, false, nil +} + +// EstimateSizes will estimate the data sizes +func EstimateSizes(in []byte, s *Scratch) (tableSz, dataSz, reuseSz int, err error) { + s, err = s.prepare(in) + if err != nil { + return 0, 0, 0, err + } + + // Create histogram, if none was provided. + tableSz, dataSz, reuseSz = -1, -1, -1 + maxCount := s.maxCount + var canReuse = false + if maxCount == 0 { + maxCount, canReuse = s.countSimple(in) + } else { + canReuse = s.canUseTable(s.prevTable) + } + + // We want the output size to be less than this: + wantSize := len(in) + if s.WantLogLess > 0 { + wantSize -= wantSize >> s.WantLogLess + } + + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount >= len(in) { + if maxCount > len(in) { + return 0, 0, 0, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) + } + if len(in) == 1 { + return 0, 0, 0, ErrIncompressible + } + // One symbol, use RLE + return 0, 0, 0, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return 0, 0, 0, ErrIncompressible + } + + // Calculate new table. + err = s.buildCTable() + if err != nil { + return 0, 0, 0, err + } + + if false && !s.canUseTable(s.cTable) { + panic("invalid table generated") + } + + tableSz, err = s.cTable.estTableSize(s) + if err != nil { + return 0, 0, 0, err + } + if canReuse { + reuseSz = s.prevTable.estimateSize(s.count[:s.symbolLen]) + } + dataSz = s.cTable.estimateSize(s.count[:s.symbolLen]) + + // Restore + return tableSz, dataSz, reuseSz, nil +} + +func (s *Scratch) compress1X(src []byte) ([]byte, error) { + return s.compress1xDo(s.Out, src), nil +} + +func (s *Scratch) compress1xDo(dst, src []byte) []byte { + var bw = bitWriter{out: dst} + + // N is length divisible by 4. + n := len(src) + n -= n & 3 + cTable := s.cTable[:256] + + // Encode last bytes. + for i := len(src) & 3; i > 0; i-- { + bw.encSymbol(cTable, src[n+i-1]) + } + n -= 4 + if s.actualTableLog <= 8 { + for ; n >= 0; n -= 4 { + tmp := src[n : n+4] + // tmp should be len 4 + bw.flush32() + bw.encFourSymbols(cTable[tmp[3]], cTable[tmp[2]], cTable[tmp[1]], cTable[tmp[0]]) + } + } else { + for ; n >= 0; n -= 4 { + tmp := src[n : n+4] + // tmp should be len 4 + bw.flush32() + bw.encTwoSymbols(cTable, tmp[3], tmp[2]) + bw.flush32() + bw.encTwoSymbols(cTable, tmp[1], tmp[0]) + } + } + bw.close() + return bw.out +} + +var sixZeros [6]byte + +func (s *Scratch) compress4X(src []byte) ([]byte, error) { + if len(src) < 12 { + return nil, ErrIncompressible + } + segmentSize := (len(src) + 3) / 4 + + // Add placeholder for output length + offsetIdx := len(s.Out) + s.Out = append(s.Out, sixZeros[:]...) + + for i := 0; i < 4; i++ { + toDo := src + if len(toDo) > segmentSize { + toDo = toDo[:segmentSize] + } + src = src[len(toDo):] + + idx := len(s.Out) + s.Out = s.compress1xDo(s.Out, toDo) + if len(s.Out)-idx > math.MaxUint16 { + // We cannot store the size in the jump table + return nil, ErrIncompressible + } + // Write compressed length as little endian before block. + if i < 3 { + // Last length is not written. + length := len(s.Out) - idx + s.Out[i*2+offsetIdx] = byte(length) + s.Out[i*2+offsetIdx+1] = byte(length >> 8) + } + } + + return s.Out, nil +} + +// compress4Xp will compress 4 streams using separate goroutines. +func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { + if len(src) < 12 { + return nil, ErrIncompressible + } + // Add placeholder for output length + s.Out = s.Out[:6] + + segmentSize := (len(src) + 3) / 4 + var wg sync.WaitGroup + wg.Add(4) + for i := 0; i < 4; i++ { + toDo := src + if len(toDo) > segmentSize { + toDo = toDo[:segmentSize] + } + src = src[len(toDo):] + + // Separate goroutine for each block. + go func(i int) { + s.tmpOut[i] = s.compress1xDo(s.tmpOut[i][:0], toDo) + wg.Done() + }(i) + } + wg.Wait() + for i := 0; i < 4; i++ { + o := s.tmpOut[i] + if len(o) > math.MaxUint16 { + // We cannot store the size in the jump table + return nil, ErrIncompressible + } + // Write compressed length as little endian before block. + if i < 3 { + // Last length is not written. + s.Out[i*2] = byte(len(o)) + s.Out[i*2+1] = byte(len(o) >> 8) + } + + // Write output. + s.Out = append(s.Out, o...) + } + return s.Out, nil +} + +// countSimple will create a simple histogram in s.count. +// Returns the biggest count. +// Does not update s.clearCount. +func (s *Scratch) countSimple(in []byte) (max int, reuse bool) { + reuse = true + _ = s.count // Assert that s != nil to speed up the following loop. + for _, v := range in { + s.count[v]++ + } + m := uint32(0) + if len(s.prevTable) > 0 { + for i, v := range s.count[:] { + if v == 0 { + continue + } + if v > m { + m = v + } + s.symbolLen = uint16(i) + 1 + if i >= len(s.prevTable) { + reuse = false + } else if s.prevTable[i].nBits == 0 { + reuse = false + } + } + return int(m), reuse + } + for i, v := range s.count[:] { + if v == 0 { + continue + } + if v > m { + m = v + } + s.symbolLen = uint16(i) + 1 + } + return int(m), false +} + +func (s *Scratch) canUseTable(c cTable) bool { + if len(c) < int(s.symbolLen) { + return false + } + for i, v := range s.count[:s.symbolLen] { + if v != 0 && c[i].nBits == 0 { + return false + } + } + return true +} + +//lint:ignore U1000 used for debugging +func (s *Scratch) validateTable(c cTable) bool { + if len(c) < int(s.symbolLen) { + return false + } + for i, v := range s.count[:s.symbolLen] { + if v != 0 { + if c[i].nBits == 0 { + return false + } + if c[i].nBits > s.actualTableLog { + return false + } + } + } + return true +} + +// minTableLog provides the minimum logSize to safely represent a distribution. +func (s *Scratch) minTableLog() uint8 { + minBitsSrc := highBit32(uint32(s.srcLen)) + 1 + minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2 + if minBitsSrc < minBitsSymbols { + return uint8(minBitsSrc) + } + return uint8(minBitsSymbols) +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *Scratch) optimalTableLog() { + tableLog := s.TableLog + minBits := s.minTableLog() + maxBitsSrc := uint8(highBit32(uint32(s.srcLen-1))) - 1 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minTablelog { + tableLog = minTablelog + } + if tableLog > tableLogMax { + tableLog = tableLogMax + } + s.actualTableLog = tableLog +} + +type cTableEntry struct { + val uint16 + nBits uint8 + // We have 8 bits extra +} + +const huffNodesMask = huffNodesLen - 1 + +func (s *Scratch) buildCTable() error { + s.optimalTableLog() + s.huffSort() + if cap(s.cTable) < maxSymbolValue+1 { + s.cTable = make([]cTableEntry, s.symbolLen, maxSymbolValue+1) + } else { + s.cTable = s.cTable[:s.symbolLen] + for i := range s.cTable { + s.cTable[i] = cTableEntry{} + } + } + + var startNode = int16(s.symbolLen) + nonNullRank := s.symbolLen - 1 + + nodeNb := startNode + huffNode := s.nodes[1 : huffNodesLen+1] + + // This overlays the slice above, but allows "-1" index lookups. + // Different from reference implementation. + huffNode0 := s.nodes[0 : huffNodesLen+1] + + for huffNode[nonNullRank].count() == 0 { + nonNullRank-- + } + + lowS := int16(nonNullRank) + nodeRoot := nodeNb + lowS - 1 + lowN := nodeNb + huffNode[nodeNb].setCount(huffNode[lowS].count() + huffNode[lowS-1].count()) + huffNode[lowS].setParent(nodeNb) + huffNode[lowS-1].setParent(nodeNb) + nodeNb++ + lowS -= 2 + for n := nodeNb; n <= nodeRoot; n++ { + huffNode[n].setCount(1 << 30) + } + // fake entry, strong barrier + huffNode0[0].setCount(1 << 31) + + // create parents + for nodeNb <= nodeRoot { + var n1, n2 int16 + if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() { + n1 = lowS + lowS-- + } else { + n1 = lowN + lowN++ + } + if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() { + n2 = lowS + lowS-- + } else { + n2 = lowN + lowN++ + } + + huffNode[nodeNb].setCount(huffNode0[n1+1].count() + huffNode0[n2+1].count()) + huffNode0[n1+1].setParent(nodeNb) + huffNode0[n2+1].setParent(nodeNb) + nodeNb++ + } + + // distribute weights (unlimited tree height) + huffNode[nodeRoot].setNbBits(0) + for n := nodeRoot - 1; n >= startNode; n-- { + huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1) + } + for n := uint16(0); n <= nonNullRank; n++ { + huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1) + } + s.actualTableLog = s.setMaxHeight(int(nonNullRank)) + maxNbBits := s.actualTableLog + + // fill result into tree (val, nbBits) + if maxNbBits > tableLogMax { + return fmt.Errorf("internal error: maxNbBits (%d) > tableLogMax (%d)", maxNbBits, tableLogMax) + } + var nbPerRank [tableLogMax + 1]uint16 + var valPerRank [16]uint16 + for _, v := range huffNode[:nonNullRank+1] { + nbPerRank[v.nbBits()]++ + } + // determine stating value per rank + { + min := uint16(0) + for n := maxNbBits; n > 0; n-- { + // get starting value within each rank + valPerRank[n] = min + min += nbPerRank[n] + min >>= 1 + } + } + + // push nbBits per symbol, symbol order + for _, v := range huffNode[:nonNullRank+1] { + s.cTable[v.symbol()].nBits = v.nbBits() + } + + // assign value within rank, symbol order + t := s.cTable[:s.symbolLen] + for n, val := range t { + nbits := val.nBits & 15 + v := valPerRank[nbits] + t[n].val = v + valPerRank[nbits] = v + 1 + } + + return nil +} + +// huffSort will sort symbols, decreasing order. +func (s *Scratch) huffSort() { + type rankPos struct { + base uint32 + current uint32 + } + + // Clear nodes + nodes := s.nodes[:huffNodesLen+1] + s.nodes = nodes + nodes = nodes[1 : huffNodesLen+1] + + // Sort into buckets based on length of symbol count. + var rank [32]rankPos + for _, v := range s.count[:s.symbolLen] { + r := highBit32(v+1) & 31 + rank[r].base++ + } + // maxBitLength is log2(BlockSizeMax) + 1 + const maxBitLength = 18 + 1 + for n := maxBitLength; n > 0; n-- { + rank[n-1].base += rank[n].base + } + for n := range rank[:maxBitLength] { + rank[n].current = rank[n].base + } + for n, c := range s.count[:s.symbolLen] { + r := (highBit32(c+1) + 1) & 31 + pos := rank[r].current + rank[r].current++ + prev := nodes[(pos-1)&huffNodesMask] + for pos > rank[r].base && c > prev.count() { + nodes[pos&huffNodesMask] = prev + pos-- + prev = nodes[(pos-1)&huffNodesMask] + } + nodes[pos&huffNodesMask] = makeNodeElt(c, byte(n)) + } +} + +func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { + maxNbBits := s.actualTableLog + huffNode := s.nodes[1 : huffNodesLen+1] + //huffNode = huffNode[: huffNodesLen] + + largestBits := huffNode[lastNonNull].nbBits() + + // early exit : no elt > maxNbBits + if largestBits <= maxNbBits { + return largestBits + } + totalCost := int(0) + baseCost := int(1) << (largestBits - maxNbBits) + n := uint32(lastNonNull) + + for huffNode[n].nbBits() > maxNbBits { + totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits())) + huffNode[n].setNbBits(maxNbBits) + n-- + } + // n stops at huffNode[n].nbBits <= maxNbBits + + for huffNode[n].nbBits() == maxNbBits { + n-- + } + // n end at index of smallest symbol using < maxNbBits + + // renorm totalCost + totalCost >>= largestBits - maxNbBits /* note : totalCost is necessarily a multiple of baseCost */ + + // repay normalized cost + { + const noSymbol = 0xF0F0F0F0 + var rankLast [tableLogMax + 2]uint32 + + for i := range rankLast[:] { + rankLast[i] = noSymbol + } + + // Get pos of last (smallest) symbol per rank + { + currentNbBits := maxNbBits + for pos := int(n); pos >= 0; pos-- { + if huffNode[pos].nbBits() >= currentNbBits { + continue + } + currentNbBits = huffNode[pos].nbBits() // < maxNbBits + rankLast[maxNbBits-currentNbBits] = uint32(pos) + } + } + + for totalCost > 0 { + nBitsToDecrease := uint8(highBit32(uint32(totalCost))) + 1 + + for ; nBitsToDecrease > 1; nBitsToDecrease-- { + highPos := rankLast[nBitsToDecrease] + lowPos := rankLast[nBitsToDecrease-1] + if highPos == noSymbol { + continue + } + if lowPos == noSymbol { + break + } + highTotal := huffNode[highPos].count() + lowTotal := 2 * huffNode[lowPos].count() + if highTotal <= lowTotal { + break + } + } + // only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) + // HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary + // FIXME: try to remove + for (nBitsToDecrease <= tableLogMax) && (rankLast[nBitsToDecrease] == noSymbol) { + nBitsToDecrease++ + } + totalCost -= 1 << (nBitsToDecrease - 1) + if rankLast[nBitsToDecrease-1] == noSymbol { + // this rank is no longer empty + rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease] + } + huffNode[rankLast[nBitsToDecrease]].setNbBits(1 + + huffNode[rankLast[nBitsToDecrease]].nbBits()) + if rankLast[nBitsToDecrease] == 0 { + /* special case, reached largest symbol */ + rankLast[nBitsToDecrease] = noSymbol + } else { + rankLast[nBitsToDecrease]-- + if huffNode[rankLast[nBitsToDecrease]].nbBits() != maxNbBits-nBitsToDecrease { + rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */ + } + } + } + + for totalCost < 0 { /* Sometimes, cost correction overshoot */ + if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */ + for huffNode[n].nbBits() == maxNbBits { + n-- + } + huffNode[n+1].setNbBits(huffNode[n+1].nbBits() - 1) + rankLast[1] = n + 1 + totalCost++ + continue + } + huffNode[rankLast[1]+1].setNbBits(huffNode[rankLast[1]+1].nbBits() - 1) + rankLast[1]++ + totalCost++ + } + } + return maxNbBits +} + +// A nodeElt is the fields +// +// count uint32 +// parent uint16 +// symbol byte +// nbBits uint8 +// +// in some order, all squashed into an integer so that the compiler +// always loads and stores entire nodeElts instead of separate fields. +type nodeElt uint64 + +func makeNodeElt(count uint32, symbol byte) nodeElt { + return nodeElt(count) | nodeElt(symbol)<<48 +} + +func (e *nodeElt) count() uint32 { return uint32(*e) } +func (e *nodeElt) parent() uint16 { return uint16(*e >> 32) } +func (e *nodeElt) symbol() byte { return byte(*e >> 48) } +func (e *nodeElt) nbBits() uint8 { return uint8(*e >> 56) } + +func (e *nodeElt) setCount(c uint32) { *e = (*e)&0xffffffff00000000 | nodeElt(c) } +func (e *nodeElt) setParent(p int16) { *e = (*e)&0xffff0000ffffffff | nodeElt(uint16(p))<<32 } +func (e *nodeElt) setNbBits(n uint8) { *e = (*e)&0x00ffffffffffffff | nodeElt(n)<<56 } diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go new file mode 100644 index 00000000..0f56b02d --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -0,0 +1,1167 @@ +package huff0 + +import ( + "errors" + "fmt" + "io" + "sync" + + "github.com/klauspost/compress/fse" +) + +type dTable struct { + single []dEntrySingle +} + +// single-symbols decoding +type dEntrySingle struct { + entry uint16 +} + +// Uses special code for all tables that are < 8 bits. +const use8BitTables = true + +// ReadTable will read a table from the input. +// The size of the input may be larger than the table definition. +// Any content remaining after the table definition will be returned. +// If no Scratch is provided a new one is allocated. +// The returned Scratch can be used for encoding or decoding input using this table. +func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) { + s, err = s.prepare(nil) + if err != nil { + return s, nil, err + } + if len(in) <= 1 { + return s, nil, errors.New("input too small for table") + } + iSize := in[0] + in = in[1:] + if iSize >= 128 { + // Uncompressed + oSize := iSize - 127 + iSize = (oSize + 1) / 2 + if int(iSize) > len(in) { + return s, nil, errors.New("input too small for table") + } + for n := uint8(0); n < oSize; n += 2 { + v := in[n/2] + s.huffWeight[n] = v >> 4 + s.huffWeight[n+1] = v & 15 + } + s.symbolLen = uint16(oSize) + in = in[iSize:] + } else { + if len(in) < int(iSize) { + return s, nil, fmt.Errorf("input too small for table, want %d bytes, have %d", iSize, len(in)) + } + // FSE compressed weights + s.fse.DecompressLimit = 255 + hw := s.huffWeight[:] + s.fse.Out = hw + b, err := fse.Decompress(in[:iSize], s.fse) + s.fse.Out = nil + if err != nil { + return s, nil, fmt.Errorf("fse decompress returned: %w", err) + } + if len(b) > 255 { + return s, nil, errors.New("corrupt input: output table too large") + } + s.symbolLen = uint16(len(b)) + in = in[iSize:] + } + + // collect weight stats + var rankStats [16]uint32 + weightTotal := uint32(0) + for _, v := range s.huffWeight[:s.symbolLen] { + if v > tableLogMax { + return s, nil, errors.New("corrupt input: weight too large") + } + v2 := v & 15 + rankStats[v2]++ + // (1 << (v2-1)) is slower since the compiler cannot prove that v2 isn't 0. + weightTotal += (1 << v2) >> 1 + } + if weightTotal == 0 { + return s, nil, errors.New("corrupt input: weights zero") + } + + // get last non-null symbol weight (implied, total must be 2^n) + { + tableLog := highBit32(weightTotal) + 1 + if tableLog > tableLogMax { + return s, nil, errors.New("corrupt input: tableLog too big") + } + s.actualTableLog = uint8(tableLog) + // determine last weight + { + total := uint32(1) << tableLog + rest := total - weightTotal + verif := uint32(1) << highBit32(rest) + lastWeight := highBit32(rest) + 1 + if verif != rest { + // last value must be a clean power of 2 + return s, nil, errors.New("corrupt input: last value not power of two") + } + s.huffWeight[s.symbolLen] = uint8(lastWeight) + s.symbolLen++ + rankStats[lastWeight]++ + } + } + + if (rankStats[1] < 2) || (rankStats[1]&1 != 0) { + // by construction : at least 2 elts of rank 1, must be even + return s, nil, errors.New("corrupt input: min elt size, even check failed ") + } + + // TODO: Choose between single/double symbol decoding + + // Calculate starting value for each rank + { + var nextRankStart uint32 + for n := uint8(1); n < s.actualTableLog+1; n++ { + current := nextRankStart + nextRankStart += rankStats[n] << (n - 1) + rankStats[n] = current + } + } + + // fill DTable (always full size) + tSize := 1 << tableLogMax + if len(s.dt.single) != tSize { + s.dt.single = make([]dEntrySingle, tSize) + } + cTable := s.prevTable + if cap(cTable) < maxSymbolValue+1 { + cTable = make([]cTableEntry, 0, maxSymbolValue+1) + } + cTable = cTable[:maxSymbolValue+1] + s.prevTable = cTable[:s.symbolLen] + s.prevTableLog = s.actualTableLog + + for n, w := range s.huffWeight[:s.symbolLen] { + if w == 0 { + cTable[n] = cTableEntry{ + val: 0, + nBits: 0, + } + continue + } + length := (uint32(1) << w) >> 1 + d := dEntrySingle{ + entry: uint16(s.actualTableLog+1-w) | (uint16(n) << 8), + } + + rank := &rankStats[w] + cTable[n] = cTableEntry{ + val: uint16(*rank >> (w - 1)), + nBits: uint8(d.entry), + } + + single := s.dt.single[*rank : *rank+length] + for i := range single { + single[i] = d + } + *rank += length + } + + return s, in, nil +} + +// Decompress1X will decompress a 1X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// Before this is called, the table must be initialized with ReadTable unless +// the encoder re-used the table. +// deprecated: Use the stateless Decoder() to get a concurrent version. +func (s *Scratch) Decompress1X(in []byte) (out []byte, err error) { + if cap(s.Out) < s.MaxDecodedSize { + s.Out = make([]byte, s.MaxDecodedSize) + } + s.Out = s.Out[:0:s.MaxDecodedSize] + s.Out, err = s.Decoder().Decompress1X(s.Out, in) + return s.Out, err +} + +// Decompress4X will decompress a 4X encoded stream. +// Before this is called, the table must be initialized with ReadTable unless +// the encoder re-used the table. +// The length of the supplied input must match the end of a block exactly. +// The destination size of the uncompressed data must be known and provided. +// deprecated: Use the stateless Decoder() to get a concurrent version. +func (s *Scratch) Decompress4X(in []byte, dstSize int) (out []byte, err error) { + if dstSize > s.MaxDecodedSize { + return nil, ErrMaxDecodedSizeExceeded + } + if cap(s.Out) < dstSize { + s.Out = make([]byte, s.MaxDecodedSize) + } + s.Out = s.Out[:0:dstSize] + s.Out, err = s.Decoder().Decompress4X(s.Out, in) + return s.Out, err +} + +// Decoder will return a stateless decoder that can be used by multiple +// decompressors concurrently. +// Before this is called, the table must be initialized with ReadTable. +// The Decoder is still linked to the scratch buffer so that cannot be reused. +// However, it is safe to discard the scratch. +func (s *Scratch) Decoder() *Decoder { + return &Decoder{ + dt: s.dt, + actualTableLog: s.actualTableLog, + bufs: &s.decPool, + } +} + +// Decoder provides stateless decoding. +type Decoder struct { + dt dTable + actualTableLog uint8 + bufs *sync.Pool +} + +func (d *Decoder) buffer() *[4][256]byte { + buf, ok := d.bufs.Get().(*[4][256]byte) + if ok { + return buf + } + return &[4][256]byte{} +} + +// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { + if d.actualTableLog == 8 { + return d.decompress1X8BitExactly(dst, src) + } + var br bitReaderBytes + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + dt := d.dt.single[:256] + + // Use temp table to avoid bound checks/append penalty. + bufs := d.buffer() + buf := &bufs[0] + var off uint8 + + switch d.actualTableLog { + case 8: + const shift = 0 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 7: + const shift = 8 - 7 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 6: + const shift = 8 - 6 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 5: + const shift = 8 - 5 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 4: + const shift = 8 - 4 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 3: + const shift = 8 - 3 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 2: + const shift = 8 - 2 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 1: + const shift = 8 - 1 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + default: + d.bufs.Put(bufs) + return nil, fmt.Errorf("invalid tablelog: %d", d.actualTableLog) + } + + if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 4, so uint8 is fine + bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) + shift := (8 - d.actualTableLog) & 7 + + for bitsLeft > 0 { + if br.bitsRead >= 64-8 { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + if len(dst) >= maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + v := dt[br.peekByteFast()>>shift] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= int8(nBits) + dst = append(dst, uint8(v.entry>>8)) + } + d.bufs.Put(bufs) + return dst, br.close() +} + +// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { + var br bitReaderBytes + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + dt := d.dt.single[:256] + + // Use temp table to avoid bound checks/append penalty. + bufs := d.buffer() + buf := &bufs[0] + var off uint8 + + const shift = 56 + + //fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog) + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + + if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 4, so uint8 is fine + bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) + for bitsLeft > 0 { + if br.bitsRead >= 64-8 { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + if len(dst) >= maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := dt[br.peekByteFast()] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= int8(nBits) + dst = append(dst, uint8(v.entry>>8)) + } + d.bufs.Put(bufs) + return dst, br.close() +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { + if d.actualTableLog == 8 { + return d.decompress4X8bitExactly(dst, src) + } + + var br [4]bitReaderBytes + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + shift := (56 + (8 - d.actualTableLog)) & 63 + + const tlSize = 1 << 8 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + buf := d.buffer() + var off uint8 + var decoded int + + // Decode 4 values from each decoder/loop. + const bufoff = 256 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + // Interleave 2 decodes. + const stream = 0 + const stream2 = 1 + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) + } + + off += 4 + + if off == 0 { + if bufoff > dstEvery { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 1") + } + // There must at least be 3 buffers left. + if len(out)-bufoff < dstEvery*3 { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 2") + } + //copy(out, buf[0][:]) + //copy(out[dstEvery:], buf[1][:]) + //copy(out[dstEvery*2:], buf[2][:]) + *(*[bufoff]byte)(out) = buf[0] + *(*[bufoff]byte)(out[dstEvery:]) = buf[1] + *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] + *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] + out = out[bufoff:] + decoded += bufoff * 4 + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + if br.finished() { + d.bufs.Put(buf) + return nil, io.ErrUnexpectedEOF + } + if br.bitsRead >= 56 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value |= uint64(low) << (br.bitsRead - 32) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + } + // end inline... + if offset >= endsAt { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + v := single[uint8(br.value>>shift)].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + d.bufs.Put(buf) + return nil, err + } + } + d.bufs.Put(buf) + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { + var br [4]bitReaderBytes + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const shift = 56 + const tlSize = 1 << 8 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + buf := d.buffer() + var off uint8 + var decoded int + + // Decode 4 values from each decoder/loop. + const bufoff = 256 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + // Interleave 2 decodes. + const stream = 0 + const stream2 = 1 + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) + } + + off += 4 + + if off == 0 { + if bufoff > dstEvery { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 1") + } + // There must at least be 3 buffers left. + if len(out)-bufoff < dstEvery*3 { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 2") + } + + //copy(out, buf[0][:]) + //copy(out[dstEvery:], buf[1][:]) + //copy(out[dstEvery*2:], buf[2][:]) + // copy(out[dstEvery*3:], buf[3][:]) + *(*[bufoff]byte)(out) = buf[0] + *(*[bufoff]byte)(out[dstEvery:]) = buf[1] + *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] + *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] + out = out[bufoff:] + decoded += bufoff * 4 + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + if br.finished() { + d.bufs.Put(buf) + return nil, io.ErrUnexpectedEOF + } + if br.bitsRead >= 56 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value |= uint64(low) << (br.bitsRead - 32) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + } + // end inline... + if offset >= endsAt { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + v := single[br.peekByteFast()].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + d.bufs.Put(buf) + return nil, err + } + } + d.bufs.Put(buf) + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// matches will compare a decoding table to a coding table. +// Errors are written to the writer. +// Nothing will be written if table is ok. +func (s *Scratch) matches(ct cTable, w io.Writer) { + if s == nil || len(s.dt.single) == 0 { + return + } + dt := s.dt.single[:1<>8) == byte(sym) { + fmt.Fprintf(w, "symbol %x has decoder, but no encoder\n", sym) + errs++ + break + } + } + if errs == 0 { + broken-- + } + continue + } + // Unused bits in input + ub := tablelog - enc.nBits + top := enc.val << ub + // decoder looks at top bits. + dec := dt[top] + if uint8(dec.entry) != enc.nBits { + fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", sym, enc.nBits, uint8(dec.entry)) + errs++ + } + if uint8(dec.entry>>8) != uint8(sym) { + fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", sym, sym, uint8(dec.entry>>8)) + errs++ + } + if errs > 0 { + fmt.Fprintf(w, "%d errors in base, stopping\n", errs) + continue + } + // Ensure that all combinations are covered. + for i := uint16(0); i < (1 << ub); i++ { + vval := top | i + dec := dt[vval] + if uint8(dec.entry) != enc.nBits { + fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", vval, enc.nBits, uint8(dec.entry)) + errs++ + } + if uint8(dec.entry>>8) != uint8(sym) { + fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", vval, sym, uint8(dec.entry>>8)) + errs++ + } + if errs > 20 { + fmt.Fprintf(w, "%d errors, stopping\n", errs) + break + } + } + if errs == 0 { + ok++ + broken-- + } + } + if broken > 0 { + fmt.Fprintf(w, "%d broken, %d ok\n", broken, ok) + } +} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go new file mode 100644 index 00000000..ba7e8e6b --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go @@ -0,0 +1,226 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +// This file contains the specialisation of Decoder.Decompress4X +// and Decoder.Decompress1X that use an asm implementation of thir main loops. +package huff0 + +import ( + "errors" + "fmt" + + "github.com/klauspost/compress/internal/cpuinfo" +) + +// decompress4x_main_loop_x86 is an x86 assembler implementation +// of Decompress4X when tablelog > 8. +// +//go:noescape +func decompress4x_main_loop_amd64(ctx *decompress4xContext) + +// decompress4x_8b_loop_x86 is an x86 assembler implementation +// of Decompress4X when tablelog <= 8 which decodes 4 entries +// per loop. +// +//go:noescape +func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) + +// fallback8BitSize is the size where using Go version is faster. +const fallback8BitSize = 800 + +type decompress4xContext struct { + pbr *[4]bitReaderShifted + peekBits uint8 + out *byte + dstEvery int + tbl *dEntrySingle + decoded int + limit *byte +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if len(src) < 6+(4*1) { + return nil, errors.New("input too small") + } + + use8BitTables := d.actualTableLog <= 8 + if cap(dst) < fallback8BitSize && use8BitTables { + return d.decompress4X8bit(dst, src) + } + + var br [4]bitReaderShifted + // Decode "jump table" + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + var decoded int + + if len(out) > 4*4 && !(br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4) { + ctx := decompress4xContext{ + pbr: &br, + peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() + out: &out[0], + dstEvery: dstEvery, + tbl: &single[0], + limit: &out[dstEvery-4], // Always stop decoding when first buffer gets here to avoid writing OOB on last. + } + if use8BitTables { + decompress4x_8b_main_loop_amd64(&ctx) + } else { + decompress4x_main_loop_amd64(&ctx) + } + + decoded = ctx.decoded + out = out[decoded/4:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + br.fill() + if offset >= endsAt { + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + val := br.peekBitsFast(d.actualTableLog) + v := single[val&tlMask].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// decompress4x_main_loop_x86 is an x86 assembler implementation +// of Decompress1X when tablelog > 8. +// +//go:noescape +func decompress1x_main_loop_amd64(ctx *decompress1xContext) + +// decompress4x_main_loop_x86 is an x86 with BMI2 assembler implementation +// of Decompress1X when tablelog > 8. +// +//go:noescape +func decompress1x_main_loop_bmi2(ctx *decompress1xContext) + +type decompress1xContext struct { + pbr *bitReaderShifted + peekBits uint8 + out *byte + outCap int + tbl *dEntrySingle + decoded int +} + +// Error reported by asm implementations +const error_max_decoded_size_exeeded = -1 + +// Decompress1X will decompress a 1X encoded stream. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + var br bitReaderShifted + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:maxDecodedSize] + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + + if maxDecodedSize >= 4 { + ctx := decompress1xContext{ + pbr: &br, + out: &dst[0], + outCap: maxDecodedSize, + peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() + tbl: &d.dt.single[0], + } + + if cpuinfo.HasBMI2() { + decompress1x_main_loop_bmi2(&ctx) + } else { + decompress1x_main_loop_amd64(&ctx) + } + if ctx.decoded == error_max_decoded_size_exeeded { + return nil, ErrMaxDecodedSizeExceeded + } + + dst = dst[:ctx.decoded] + } + + // br < 8, so uint8 is fine + bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead + for bitsLeft > 0 { + br.fill() + if len(dst) >= maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= nBits + dst = append(dst, uint8(v.entry>>8)) + } + return dst, br.close() +} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s new file mode 100644 index 00000000..c4c7ab2d --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s @@ -0,0 +1,830 @@ +// Code generated by command: go run gen.go -out ../decompress_amd64.s -pkg=huff0. DO NOT EDIT. + +//go:build amd64 && !appengine && !noasm && gc + +// func decompress4x_main_loop_amd64(ctx *decompress4xContext) +TEXT ·decompress4x_main_loop_amd64(SB), $0-8 + // Preload values + MOVQ ctx+0(FP), AX + MOVBQZX 8(AX), DI + MOVQ 16(AX), BX + MOVQ 48(AX), SI + MOVQ 24(AX), R8 + MOVQ 32(AX), R9 + MOVQ (AX), R10 + + // Main loop +main_loop: + XORL DX, DX + CMPQ BX, SI + SETGE DL + + // br0.fillFast32() + MOVQ 32(R10), R11 + MOVBQZX 40(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill0 + MOVQ 24(R10), AX + SUBQ $0x20, R12 + SUBQ $0x04, AX + MOVQ (R10), R13 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 24(R10) + ORQ R13, R11 + + // exhausted += (br0.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL + +skip_fill0: + // val0 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br0.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R11, R13 + SHRQ CL, R13 + + // v1 := table[val1&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (BX) + + // update the bitreader structure + MOVQ R11, 32(R10) + MOVB R12, 40(R10) + + // br1.fillFast32() + MOVQ 80(R10), R11 + MOVBQZX 88(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill1 + MOVQ 72(R10), AX + SUBQ $0x20, R12 + SUBQ $0x04, AX + MOVQ 48(R10), R13 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 72(R10) + ORQ R13, R11 + + // exhausted += (br1.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL + +skip_fill1: + // val0 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br1.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R11, R13 + SHRQ CL, R13 + + // v1 := table[val1&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (BX)(R8*1) + + // update the bitreader structure + MOVQ R11, 80(R10) + MOVB R12, 88(R10) + + // br2.fillFast32() + MOVQ 128(R10), R11 + MOVBQZX 136(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill2 + MOVQ 120(R10), AX + SUBQ $0x20, R12 + SUBQ $0x04, AX + MOVQ 96(R10), R13 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 120(R10) + ORQ R13, R11 + + // exhausted += (br2.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL + +skip_fill2: + // val0 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br2.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R11, R13 + SHRQ CL, R13 + + // v1 := table[val1&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (BX)(R8*2) + + // update the bitreader structure + MOVQ R11, 128(R10) + MOVB R12, 136(R10) + + // br3.fillFast32() + MOVQ 176(R10), R11 + MOVBQZX 184(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill3 + MOVQ 168(R10), AX + SUBQ $0x20, R12 + SUBQ $0x04, AX + MOVQ 144(R10), R13 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 168(R10) + ORQ R13, R11 + + // exhausted += (br3.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL + +skip_fill3: + // val0 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br3.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R11, R13 + SHRQ CL, R13 + + // v1 := table[val1&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + LEAQ (R8)(R8*2), CX + MOVW AX, (BX)(CX*1) + + // update the bitreader structure + MOVQ R11, 176(R10) + MOVB R12, 184(R10) + ADDQ $0x02, BX + TESTB DL, DL + JZ main_loop + MOVQ ctx+0(FP), AX + SUBQ 16(AX), BX + SHLQ $0x02, BX + MOVQ BX, 40(AX) + RET + +// func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) +TEXT ·decompress4x_8b_main_loop_amd64(SB), $0-8 + // Preload values + MOVQ ctx+0(FP), CX + MOVBQZX 8(CX), DI + MOVQ 16(CX), BX + MOVQ 48(CX), SI + MOVQ 24(CX), R8 + MOVQ 32(CX), R9 + MOVQ (CX), R10 + + // Main loop +main_loop: + XORL DX, DX + CMPQ BX, SI + SETGE DL + + // br0.fillFast32() + MOVQ 32(R10), R11 + MOVBQZX 40(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill0 + MOVQ 24(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ (R10), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 24(R10) + ORQ R14, R11 + + // exhausted += (br0.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL + +skip_fill0: + // val0 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v1 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // val2 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v2 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // val3 := br0.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v3 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br0.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (BX) + + // update the bitreader structure + MOVQ R11, 32(R10) + MOVB R12, 40(R10) + + // br1.fillFast32() + MOVQ 80(R10), R11 + MOVBQZX 88(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill1 + MOVQ 72(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 48(R10), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 72(R10) + ORQ R14, R11 + + // exhausted += (br1.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL + +skip_fill1: + // val0 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v1 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // val2 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v2 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // val3 := br1.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v3 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br1.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (BX)(R8*1) + + // update the bitreader structure + MOVQ R11, 80(R10) + MOVB R12, 88(R10) + + // br2.fillFast32() + MOVQ 128(R10), R11 + MOVBQZX 136(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill2 + MOVQ 120(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 96(R10), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 120(R10) + ORQ R14, R11 + + // exhausted += (br2.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL + +skip_fill2: + // val0 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v1 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // val2 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v2 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // val3 := br2.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v3 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br2.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (BX)(R8*2) + + // update the bitreader structure + MOVQ R11, 128(R10) + MOVB R12, 136(R10) + + // br3.fillFast32() + MOVQ 176(R10), R11 + MOVBQZX 184(R10), R12 + CMPQ R12, $0x20 + JBE skip_fill3 + MOVQ 168(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 144(R10), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 168(R10) + ORQ R14, R11 + + // exhausted += (br3.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL + +skip_fill3: + // val0 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v0 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + + // val1 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v1 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // val2 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v2 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R11 + ADDB CL, R12 + + // val3 := br3.peekTopBits(peekBits) + MOVQ R11, R13 + MOVQ DI, CX + SHRQ CL, R13 + + // v3 := table[val0&mask] + MOVW (R9)(R13*2), CX + + // br3.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R11 + ADDB CL, R12 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + LEAQ (R8)(R8*2), CX + MOVL AX, (BX)(CX*1) + + // update the bitreader structure + MOVQ R11, 176(R10) + MOVB R12, 184(R10) + ADDQ $0x04, BX + TESTB DL, DL + JZ main_loop + MOVQ ctx+0(FP), AX + SUBQ 16(AX), BX + SHLQ $0x02, BX + MOVQ BX, 40(AX) + RET + +// func decompress1x_main_loop_amd64(ctx *decompress1xContext) +TEXT ·decompress1x_main_loop_amd64(SB), $0-8 + MOVQ ctx+0(FP), CX + MOVQ 16(CX), DX + MOVQ 24(CX), BX + CMPQ BX, $0x04 + JB error_max_decoded_size_exceeded + LEAQ (DX)(BX*1), BX + MOVQ (CX), SI + MOVQ (SI), R8 + MOVQ 24(SI), R9 + MOVQ 32(SI), R10 + MOVBQZX 40(SI), R11 + MOVQ 32(CX), SI + MOVBQZX 8(CX), DI + JMP loop_condition + +main_loop: + // Check if we have room for 4 bytes in the output buffer + LEAQ 4(DX), CX + CMPQ CX, BX + JGE error_max_decoded_size_exceeded + + // Decode 4 values + CMPQ R11, $0x20 + JL bitReader_fillFast_1_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), R12 + MOVQ R11, CX + SHLQ CL, R12 + ORQ R12, R10 + +bitReader_fillFast_1_end: + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + BSWAPL AX + CMPQ R11, $0x20 + JL bitReader_fillFast_2_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), R12 + MOVQ R11, CX + SHLQ CL, R12 + ORQ R12, R10 + +bitReader_fillFast_2_end: + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + BSWAPL AX + + // Store the decoded values + MOVL AX, (DX) + ADDQ $0x04, DX + +loop_condition: + CMPQ R9, $0x08 + JGE main_loop + + // Update ctx structure + MOVQ ctx+0(FP), AX + SUBQ 16(AX), DX + MOVQ DX, 40(AX) + MOVQ (AX), AX + MOVQ R9, 24(AX) + MOVQ R10, 32(AX) + MOVB R11, 40(AX) + RET + + // Report error +error_max_decoded_size_exceeded: + MOVQ ctx+0(FP), AX + MOVQ $-1, CX + MOVQ CX, 40(AX) + RET + +// func decompress1x_main_loop_bmi2(ctx *decompress1xContext) +// Requires: BMI2 +TEXT ·decompress1x_main_loop_bmi2(SB), $0-8 + MOVQ ctx+0(FP), CX + MOVQ 16(CX), DX + MOVQ 24(CX), BX + CMPQ BX, $0x04 + JB error_max_decoded_size_exceeded + LEAQ (DX)(BX*1), BX + MOVQ (CX), SI + MOVQ (SI), R8 + MOVQ 24(SI), R9 + MOVQ 32(SI), R10 + MOVBQZX 40(SI), R11 + MOVQ 32(CX), SI + MOVBQZX 8(CX), DI + JMP loop_condition + +main_loop: + // Check if we have room for 4 bytes in the output buffer + LEAQ 4(DX), CX + CMPQ CX, BX + JGE error_max_decoded_size_exceeded + + // Decode 4 values + CMPQ R11, $0x20 + JL bitReader_fillFast_1_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), CX + SHLXQ R11, CX, CX + ORQ CX, R10 + +bitReader_fillFast_1_end: + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + BSWAPL AX + CMPQ R11, $0x20 + JL bitReader_fillFast_2_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), CX + SHLXQ R11, CX, CX + ORQ CX, R10 + +bitReader_fillFast_2_end: + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + BSWAPL AX + + // Store the decoded values + MOVL AX, (DX) + ADDQ $0x04, DX + +loop_condition: + CMPQ R9, $0x08 + JGE main_loop + + // Update ctx structure + MOVQ ctx+0(FP), AX + SUBQ 16(AX), DX + MOVQ DX, 40(AX) + MOVQ (AX), AX + MOVQ R9, 24(AX) + MOVQ R10, 32(AX) + MOVB R11, 40(AX) + RET + + // Report error +error_max_decoded_size_exceeded: + MOVQ ctx+0(FP), AX + MOVQ $-1, CX + MOVQ CX, 40(AX) + RET diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go new file mode 100644 index 00000000..908c17de --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go @@ -0,0 +1,299 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +// This file contains a generic implementation of Decoder.Decompress4X. +package huff0 + +import ( + "errors" + "fmt" +) + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if len(src) < 6+(4*1) { + return nil, errors.New("input too small") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress4X8bit(dst, src) + } + + var br [4]bitReaderShifted + // Decode "jump table" + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + buf := d.buffer() + var off uint8 + var decoded int + + // Decode 2 values from each decoder/loop. + const bufoff = 256 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + const stream = 0 + const stream2 = 1 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + v2 := single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off] = uint8(v.entry >> 8) + buf[stream2][off] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + v2 = single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off+1] = uint8(v.entry >> 8) + buf[stream2][off+1] = uint8(v2.entry >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + v2 := single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off] = uint8(v.entry >> 8) + buf[stream2][off] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + v2 = single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off+1] = uint8(v.entry >> 8) + buf[stream2][off+1] = uint8(v2.entry >> 8) + } + + off += 2 + + if off == 0 { + if bufoff > dstEvery { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 1") + } + // There must at least be 3 buffers left. + if len(out)-bufoff < dstEvery*3 { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 2") + } + //copy(out, buf[0][:]) + //copy(out[dstEvery:], buf[1][:]) + //copy(out[dstEvery*2:], buf[2][:]) + //copy(out[dstEvery*3:], buf[3][:]) + *(*[bufoff]byte)(out) = buf[0] + *(*[bufoff]byte)(out[dstEvery:]) = buf[1] + *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] + *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] + out = out[bufoff:] + decoded += bufoff * 4 + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + br.fill() + if offset >= endsAt { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + val := br.peekBitsFast(d.actualTableLog) + v := single[val&tlMask].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + d.bufs.Put(buf) + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// Decompress1X will decompress a 1X encoded stream. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress1X8Bit(dst, src) + } + var br bitReaderShifted + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + dt := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + bufs := d.buffer() + buf := &bufs[0] + var off uint8 + + for br.off >= 8 { + br.fillFast() + v := dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + // Refill + br.fillFast() + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + + if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 8, so uint8 is fine + bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead + for bitsLeft > 0 { + br.fill() + if false && br.bitsRead >= 32 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value = (br.value << 32) | uint64(low) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value = (br.value << 8) | uint64(br.in[br.off-1]) + br.bitsRead -= 8 + br.off-- + } + } + } + if len(dst) >= maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= nBits + dst = append(dst, uint8(v.entry>>8)) + } + d.bufs.Put(bufs) + return dst, br.close() +} diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go new file mode 100644 index 00000000..77ecd68e --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/huff0.go @@ -0,0 +1,337 @@ +// Package huff0 provides fast huffman encoding as used in zstd. +// +// See README.md at https://github.com/klauspost/compress/tree/master/huff0 for details. +package huff0 + +import ( + "errors" + "fmt" + "math" + "math/bits" + "sync" + + "github.com/klauspost/compress/fse" +) + +const ( + maxSymbolValue = 255 + + // zstandard limits tablelog to 11, see: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#huffman-tree-description + tableLogMax = 11 + tableLogDefault = 11 + minTablelog = 5 + huffNodesLen = 512 + + // BlockSizeMax is maximum input size for a single block uncompressed. + BlockSizeMax = 1<<18 - 1 +) + +var ( + // ErrIncompressible is returned when input is judged to be too hard to compress. + ErrIncompressible = errors.New("input is not compressible") + + // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. + ErrUseRLE = errors.New("input is single value repeated") + + // ErrTooBig is return if input is too large for a single block. + ErrTooBig = errors.New("input too big") + + // ErrMaxDecodedSizeExceeded is return if input is too large for a single block. + ErrMaxDecodedSizeExceeded = errors.New("maximum output size exceeded") +) + +type ReusePolicy uint8 + +const ( + // ReusePolicyAllow will allow reuse if it produces smaller output. + ReusePolicyAllow ReusePolicy = iota + + // ReusePolicyPrefer will re-use aggressively if possible. + // This will not check if a new table will produce smaller output, + // except if the current table is impossible to use or + // compressed output is bigger than input. + ReusePolicyPrefer + + // ReusePolicyNone will disable re-use of tables. + // This is slightly faster than ReusePolicyAllow but may produce larger output. + ReusePolicyNone + + // ReusePolicyMust must allow reuse and produce smaller output. + ReusePolicyMust +) + +type Scratch struct { + count [maxSymbolValue + 1]uint32 + + // Per block parameters. + // These can be used to override compression parameters of the block. + // Do not touch, unless you know what you are doing. + + // Out is output buffer. + // If the scratch is re-used before the caller is done processing the output, + // set this field to nil. + // Otherwise the output buffer will be re-used for next Compression/Decompression step + // and allocation will be avoided. + Out []byte + + // OutTable will contain the table data only, if a new table has been generated. + // Slice of the returned data. + OutTable []byte + + // OutData will contain the compressed data. + // Slice of the returned data. + OutData []byte + + // MaxDecodedSize will set the maximum allowed output size. + // This value will automatically be set to BlockSizeMax if not set. + // Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded. + MaxDecodedSize int + + srcLen int + + // MaxSymbolValue will override the maximum symbol value of the next block. + MaxSymbolValue uint8 + + // TableLog will attempt to override the tablelog for the next block. + // Must be <= 11 and >= 5. + TableLog uint8 + + // Reuse will specify the reuse policy + Reuse ReusePolicy + + // WantLogLess allows to specify a log 2 reduction that should at least be achieved, + // otherwise the block will be returned as incompressible. + // The reduction should then at least be (input size >> WantLogLess) + // If WantLogLess == 0 any improvement will do. + WantLogLess uint8 + + symbolLen uint16 // Length of active part of the symbol table. + maxCount int // count of the most probable symbol + clearCount bool // clear count + actualTableLog uint8 // Selected tablelog. + prevTableLog uint8 // Tablelog for previous table + prevTable cTable // Table used for previous compression. + cTable cTable // compression table + dt dTable // decompression table + nodes []nodeElt + tmpOut [4][]byte + fse *fse.Scratch + decPool sync.Pool // *[4][256]byte buffers. + huffWeight [maxSymbolValue + 1]byte +} + +// TransferCTable will transfer the previously used compression table. +func (s *Scratch) TransferCTable(src *Scratch) { + if cap(s.prevTable) < len(src.prevTable) { + s.prevTable = make(cTable, 0, maxSymbolValue+1) + } + s.prevTable = s.prevTable[:len(src.prevTable)] + copy(s.prevTable, src.prevTable) + s.prevTableLog = src.prevTableLog +} + +func (s *Scratch) prepare(in []byte) (*Scratch, error) { + if len(in) > BlockSizeMax { + return nil, ErrTooBig + } + if s == nil { + s = &Scratch{} + } + if s.MaxSymbolValue == 0 { + s.MaxSymbolValue = maxSymbolValue + } + if s.TableLog == 0 { + s.TableLog = tableLogDefault + } + if s.TableLog > tableLogMax || s.TableLog < minTablelog { + return nil, fmt.Errorf(" invalid tableLog %d (%d -> %d)", s.TableLog, minTablelog, tableLogMax) + } + if s.MaxDecodedSize <= 0 || s.MaxDecodedSize > BlockSizeMax { + s.MaxDecodedSize = BlockSizeMax + } + if s.clearCount && s.maxCount == 0 { + for i := range s.count { + s.count[i] = 0 + } + s.clearCount = false + } + if cap(s.Out) == 0 { + s.Out = make([]byte, 0, len(in)) + } + s.Out = s.Out[:0] + + s.OutTable = nil + s.OutData = nil + if cap(s.nodes) < huffNodesLen+1 { + s.nodes = make([]nodeElt, 0, huffNodesLen+1) + } + s.nodes = s.nodes[:0] + if s.fse == nil { + s.fse = &fse.Scratch{} + } + s.srcLen = len(in) + + return s, nil +} + +type cTable []cTableEntry + +func (c cTable) write(s *Scratch) error { + var ( + // precomputed conversion table + bitsToWeight [tableLogMax + 1]byte + huffLog = s.actualTableLog + // last weight is not saved. + maxSymbolValue = uint8(s.symbolLen - 1) + huffWeight = s.huffWeight[:256] + ) + const ( + maxFSETableLog = 6 + ) + // convert to weight + bitsToWeight[0] = 0 + for n := uint8(1); n < huffLog+1; n++ { + bitsToWeight[n] = huffLog + 1 - n + } + + // Acquire histogram for FSE. + hist := s.fse.Histogram() + hist = hist[:256] + for i := range hist[:16] { + hist[i] = 0 + } + for n := uint8(0); n < maxSymbolValue; n++ { + v := bitsToWeight[c[n].nBits] & 15 + huffWeight[n] = v + hist[v]++ + } + + // FSE compress if feasible. + if maxSymbolValue >= 2 { + huffMaxCnt := uint32(0) + huffMax := uint8(0) + for i, v := range hist[:16] { + if v == 0 { + continue + } + huffMax = byte(i) + if v > huffMaxCnt { + huffMaxCnt = v + } + } + s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) + s.fse.TableLog = maxFSETableLog + b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) + if err == nil && len(b) < int(s.symbolLen>>1) { + s.Out = append(s.Out, uint8(len(b))) + s.Out = append(s.Out, b...) + return nil + } + // Unable to compress (RLE/uncompressible) + } + // write raw values as 4-bits (max : 15) + if maxSymbolValue > (256 - 128) { + // should not happen : likely means source cannot be compressed + return ErrIncompressible + } + op := s.Out + // special case, pack weights 4 bits/weight. + op = append(op, 128|(maxSymbolValue-1)) + // be sure it doesn't cause msan issue in final combination + huffWeight[maxSymbolValue] = 0 + for n := uint16(0); n < uint16(maxSymbolValue); n += 2 { + op = append(op, (huffWeight[n]<<4)|huffWeight[n+1]) + } + s.Out = op + return nil +} + +func (c cTable) estTableSize(s *Scratch) (sz int, err error) { + var ( + // precomputed conversion table + bitsToWeight [tableLogMax + 1]byte + huffLog = s.actualTableLog + // last weight is not saved. + maxSymbolValue = uint8(s.symbolLen - 1) + huffWeight = s.huffWeight[:256] + ) + const ( + maxFSETableLog = 6 + ) + // convert to weight + bitsToWeight[0] = 0 + for n := uint8(1); n < huffLog+1; n++ { + bitsToWeight[n] = huffLog + 1 - n + } + + // Acquire histogram for FSE. + hist := s.fse.Histogram() + hist = hist[:256] + for i := range hist[:16] { + hist[i] = 0 + } + for n := uint8(0); n < maxSymbolValue; n++ { + v := bitsToWeight[c[n].nBits] & 15 + huffWeight[n] = v + hist[v]++ + } + + // FSE compress if feasible. + if maxSymbolValue >= 2 { + huffMaxCnt := uint32(0) + huffMax := uint8(0) + for i, v := range hist[:16] { + if v == 0 { + continue + } + huffMax = byte(i) + if v > huffMaxCnt { + huffMaxCnt = v + } + } + s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) + s.fse.TableLog = maxFSETableLog + b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) + if err == nil && len(b) < int(s.symbolLen>>1) { + sz += 1 + len(b) + return sz, nil + } + // Unable to compress (RLE/uncompressible) + } + // write raw values as 4-bits (max : 15) + if maxSymbolValue > (256 - 128) { + // should not happen : likely means source cannot be compressed + return 0, ErrIncompressible + } + // special case, pack weights 4 bits/weight. + sz += 1 + int(maxSymbolValue/2) + return sz, nil +} + +// estimateSize returns the estimated size in bytes of the input represented in the +// histogram supplied. +func (c cTable) estimateSize(hist []uint32) int { + nbBits := uint32(7) + for i, v := range c[:len(hist)] { + nbBits += uint32(v.nBits) * hist[i] + } + return int(nbBits >> 3) +} + +// minSize returns the minimum possible size considering the shannon limit. +func (s *Scratch) minSize(total int) int { + nbBits := float64(7) + fTotal := float64(total) + for _, v := range s.count[:s.symbolLen] { + n := float64(v) + if n > 0 { + nbBits += math.Log2(fTotal/n) * n + } + } + return int(nbBits) >> 3 +} + +func highBit32(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go new file mode 100644 index 00000000..3954c512 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go @@ -0,0 +1,34 @@ +// Package cpuinfo gives runtime info about the current CPU. +// +// This is a very limited module meant for use internally +// in this project. For more versatile solution check +// https://github.com/klauspost/cpuid. +package cpuinfo + +// HasBMI1 checks whether an x86 CPU supports the BMI1 extension. +func HasBMI1() bool { + return hasBMI1 +} + +// HasBMI2 checks whether an x86 CPU supports the BMI2 extension. +func HasBMI2() bool { + return hasBMI2 +} + +// DisableBMI2 will disable BMI2, for testing purposes. +// Call returned function to restore previous state. +func DisableBMI2() func() { + old := hasBMI2 + hasBMI2 = false + return func() { + hasBMI2 = old + } +} + +// HasBMI checks whether an x86 CPU supports both BMI1 and BMI2 extensions. +func HasBMI() bool { + return HasBMI1() && HasBMI2() +} + +var hasBMI1 bool +var hasBMI2 bool diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go new file mode 100644 index 00000000..e802579c --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go @@ -0,0 +1,11 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package cpuinfo + +// go:noescape +func x86extensions() (bmi1, bmi2 bool) + +func init() { + hasBMI1, hasBMI2 = x86extensions() +} diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s new file mode 100644 index 00000000..4465fbe9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s @@ -0,0 +1,36 @@ +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" +#include "funcdata.h" +#include "go_asm.h" + +TEXT ·x86extensions(SB), NOSPLIT, $0 + // 1. determine max EAX value + XORQ AX, AX + CPUID + + CMPQ AX, $7 + JB unsupported + + // 2. EAX = 7, ECX = 0 --- see Table 3-8 "Information Returned by CPUID Instruction" + MOVQ $7, AX + MOVQ $0, CX + CPUID + + BTQ $3, BX // bit 3 = BMI1 + SETCS AL + + BTQ $8, BX // bit 8 = BMI2 + SETCS AH + + MOVB AL, bmi1+0(FP) + MOVB AH, bmi2+1(FP) + RET + +unsupported: + XORQ AX, AX + MOVB AL, bmi1+0(FP) + MOVB AL, bmi2+1(FP) + RET diff --git a/vendor/github.com/klauspost/compress/internal/snapref/LICENSE b/vendor/github.com/klauspost/compress/internal/snapref/LICENSE new file mode 100644 index 00000000..6050c10f --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/compress/internal/snapref/decode.go b/vendor/github.com/klauspost/compress/internal/snapref/decode.go new file mode 100644 index 00000000..40796a49 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/decode.go @@ -0,0 +1,264 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +import ( + "encoding/binary" + "errors" + "io" +) + +var ( + // ErrCorrupt reports that the input is invalid. + ErrCorrupt = errors.New("snappy: corrupt input") + // ErrTooLarge reports that the uncompressed length is too large. + ErrTooLarge = errors.New("snappy: decoded block is too large") + // ErrUnsupported reports that the input isn't supported. + ErrUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// DecodedLen returns the length of the decoded block. +func DecodedLen(src []byte) (int, error) { + v, _, err := decodedLen(src) + return v, err +} + +// decodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func decodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrTooLarge + } + return int(v), n, nil +} + +const ( + decodeErrCodeCorrupt = 1 + decodeErrCodeUnsupportedLiteralLength = 2 +) + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// Decode handles the Snappy block format, not the Snappy stream format. +func Decode(dst, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + if dLen <= len(dst) { + dst = dst[:dLen] + } else { + dst = make([]byte, dLen) + } + switch decode(dst, src[s:]) { + case 0: + return dst, nil + case decodeErrCodeUnsupportedLiteralLength: + return nil, errUnsupportedLiteralLength + } + return nil, ErrCorrupt +} + +// NewReader returns a new Reader that decompresses from r, using the framing +// format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +func NewReader(r io.Reader) *Reader { + return &Reader{ + r: r, + decoded: make([]byte, maxBlockSize), + buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), + } +} + +// Reader is an io.Reader that can read Snappy-compressed bytes. +// +// Reader handles the Snappy stream format, not the Snappy block format. +type Reader struct { + r io.Reader + err error + decoded []byte + buf []byte + // decoded[i:j] contains decoded bytes that have not yet been passed on. + i, j int + readHeader bool +} + +// Reset discards any buffered data, resets all state, and switches the Snappy +// reader to read from r. This permits reusing a Reader rather than allocating +// a new one. +func (r *Reader) Reset(reader io.Reader) { + r.r = reader + r.err = nil + r.i = 0 + r.j = 0 + r.readHeader = false +} + +func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + } + return false + } + return true +} + +func (r *Reader) fill() error { + for r.i >= r.j { + if !r.readFull(r.buf[:4], true) { + return r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + r.err = ErrUnsupported + return r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + n, err := DecodedLen(buf) + if err != nil { + r.err = err + return r.err + } + if n > len(r.decoded) { + r.err = ErrCorrupt + return r.err + } + if _, err := Decode(r.decoded, buf); err != nil { + r.err = err + return r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeUncompressedData: + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return r.err + } + buf := r.buf[:checksumSize] + if !r.readFull(buf, false) { + return r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - checksumSize + if n > len(r.decoded) { + r.err = ErrCorrupt + return r.err + } + if !r.readFull(r.decoded[:n], false) { + return r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return r.err + } + if !r.readFull(r.buf[:len(magicBody)], false) { + return r.err + } + for i := 0; i < len(magicBody); i++ { + if r.buf[i] != magicBody[i] { + r.err = ErrCorrupt + return r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + r.err = ErrUnsupported + return r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return r.err + } + } + + return nil +} + +// Read satisfies the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + + if err := r.fill(); err != nil { + return 0, err + } + + n := copy(p, r.decoded[r.i:r.j]) + r.i += n + return n, nil +} + +// ReadByte satisfies the io.ByteReader interface. +func (r *Reader) ReadByte() (byte, error) { + if r.err != nil { + return 0, r.err + } + + if err := r.fill(); err != nil { + return 0, err + } + + c := r.decoded[r.i] + r.i++ + return c, nil +} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go new file mode 100644 index 00000000..77395a6b --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go @@ -0,0 +1,113 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +// decode writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read, and that len(dst) +// equals that length. +// +// It returns 0 on success or a decodeErrCodeXxx error code on failure. +func decode(dst, src []byte) int { + var d, s, offset, length int + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + length = int(x) + 1 + if length <= 0 { + return decodeErrCodeUnsupportedLiteralLength + } + if length > len(dst)-d || length > len(src)-s { + return decodeErrCodeCorrupt + } + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + + case tagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + + case tagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + } + + if offset <= 0 || d < offset || length > len(dst)-d { + return decodeErrCodeCorrupt + } + // Copy from an earlier sub-slice of dst to a later sub-slice. + // If no overlap, use the built-in copy: + if offset >= length { + copy(dst[d:d+length], dst[d-offset:]) + d += length + continue + } + + // Unlike the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + // + // We align the slices into a and b and show the compiler they are the same size. + // This allows the loop to run without bounds checks. + a := dst[d : d+length] + b := dst[d-offset:] + b = b[:len(a)] + for i := range a { + a[i] = b[i] + } + d += length + } + if d != len(dst) { + return decodeErrCodeCorrupt + } + return 0 +} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode.go b/vendor/github.com/klauspost/compress/internal/snapref/encode.go new file mode 100644 index 00000000..13c6040a --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode.go @@ -0,0 +1,289 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +import ( + "encoding/binary" + "errors" + "io" +) + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// Encode handles the Snappy block format, not the Snappy stream format. +func Encode(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return dst[:d] +} + +// inputMargin is the minimum number of extra input bytes to keep, inside +// encodeBlock's inner loop. On some architectures, this margin lets us +// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) +// literals can be implemented as a single load to and store from a 16-byte +// register. That literal's actual length can be as short as 1 byte, so this +// can copy up to 15 bytes too much, but that's OK as subsequent iterations of +// the encoding loop will fix up the copy overrun, and this inputMargin ensures +// that we don't overrun the dst and src buffers. +const inputMargin = 16 - 1 + +// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that +// could be encoded with a copy tag. This is the minimum with respect to the +// algorithm used by encodeBlock, not a minimum enforced by the file format. +// +// The encoded output must start with at least a 1 byte literal, as there are +// no previous bytes to copy. A minimal (1 byte) copy after that, generated +// from an emitCopy call in encodeBlock's main loop, would require at least +// another inputMargin bytes, for the reason above: we want any emitLiteral +// calls inside encodeBlock's main loop to use the fast path if possible, which +// requires being able to overrun by inputMargin bytes. Thus, +// minNonLiteralBlockSize equals 1 + 1 + inputMargin. +// +// The C++ code doesn't use this exact threshold, but it could, as discussed at +// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion +// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an +// optimization. It should not affect the encoded form. This is tested by +// TestSameEncodingAsCppShortCopies. +const minNonLiteralBlockSize = 1 + 1 + inputMargin + +// MaxEncodedLen returns the maximum length of a snappy block, given its +// uncompressed length. +// +// It will return a negative value if srcLen is too large to encode. +func MaxEncodedLen(srcLen int) int { + n := uint64(srcLen) + if n > 0xffffffff { + return -1 + } + // Compressed data can be defined as: + // compressed := item* literal* + // item := literal* copy + // + // The trailing literal sequence has a space blowup of at most 62/60 + // since a literal of length 60 needs one tag byte + one extra byte + // for length information. + // + // Item blowup is trickier to measure. Suppose the "copy" op copies + // 4 bytes of data. Because of a special check in the encoding code, + // we produce a 4-byte copy only if the offset is < 65536. Therefore + // the copy op takes 3 bytes to encode, and this type of item leads + // to at most the 62/60 blowup for representing literals. + // + // Suppose the "copy" op copies 5 bytes of data. If the offset is big + // enough, it will take 5 bytes to encode the copy op. Therefore the + // worst case here is a one-byte literal followed by a five-byte copy. + // That is, 6 bytes of input turn into 7 bytes of "compressed" data. + // + // This last factor dominates the blowup, so the final estimate is: + n = 32 + n + n/6 + if n > 0xffffffff { + return -1 + } + return int(n) +} + +var errClosed = errors.New("snappy: Writer is closed") + +// NewWriter returns a new Writer that compresses to w. +// +// The Writer returned does not buffer writes. There is no need to Flush or +// Close such a Writer. +// +// Deprecated: the Writer returned is not suitable for many small writes, only +// for few large writes. Use NewBufferedWriter instead, which is efficient +// regardless of the frequency and shape of the writes, and remember to Close +// that Writer when done. +func NewWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + obuf: make([]byte, obufLen), + } +} + +// NewBufferedWriter returns a new Writer that compresses to w, using the +// framing format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +// +// The Writer returned buffers writes. Users must call Close to guarantee all +// data has been forwarded to the underlying io.Writer. They may also call +// Flush zero or more times before calling Close. +func NewBufferedWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + ibuf: make([]byte, 0, maxBlockSize), + obuf: make([]byte, obufLen), + } +} + +// Writer is an io.Writer that can write Snappy-compressed bytes. +// +// Writer handles the Snappy stream format, not the Snappy block format. +type Writer struct { + w io.Writer + err error + + // ibuf is a buffer for the incoming (uncompressed) bytes. + // + // Its use is optional. For backwards compatibility, Writers created by the + // NewWriter function have ibuf == nil, do not buffer incoming bytes, and + // therefore do not need to be Flush'ed or Close'd. + ibuf []byte + + // obuf is a buffer for the outgoing (compressed) bytes. + obuf []byte + + // wroteStreamHeader is whether we have written the stream header. + wroteStreamHeader bool +} + +// Reset discards the writer's state and switches the Snappy writer to write to +// w. This permits reusing a Writer rather than allocating a new one. +func (w *Writer) Reset(writer io.Writer) { + w.w = writer + w.err = nil + if w.ibuf != nil { + w.ibuf = w.ibuf[:0] + } + w.wroteStreamHeader = false +} + +// Write satisfies the io.Writer interface. +func (w *Writer) Write(p []byte) (nRet int, errRet error) { + if w.ibuf == nil { + // Do not buffer incoming bytes. This does not perform or compress well + // if the caller of Writer.Write writes many small slices. This + // behavior is therefore deprecated, but still supported for backwards + // compatibility with code that doesn't explicitly Flush or Close. + return w.write(p) + } + + // The remainder of this method is based on bufio.Writer.Write from the + // standard library. + + for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { + var n int + if len(w.ibuf) == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, _ = w.write(p) + } else { + n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + w.Flush() + } + nRet += n + p = p[n:] + } + if w.err != nil { + return nRet, w.err + } + n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + nRet += n + return nRet, nil +} + +func (w *Writer) write(p []byte) (nRet int, errRet error) { + if w.err != nil { + return 0, w.err + } + for len(p) > 0 { + obufStart := len(magicChunk) + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + copy(w.obuf, magicChunk) + obufStart = 0 + } + + var uncompressed []byte + if len(p) > maxBlockSize { + uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] + } else { + uncompressed, p = p, nil + } + checksum := crc(uncompressed) + + // Compress the buffer, discarding the result if the improvement + // isn't at least 12.5%. + compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) + chunkType := uint8(chunkTypeCompressedData) + chunkLen := 4 + len(compressed) + obufEnd := obufHeaderLen + len(compressed) + if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { + chunkType = chunkTypeUncompressedData + chunkLen = 4 + len(uncompressed) + obufEnd = obufHeaderLen + } + + // Fill in the per-chunk header that comes before the body. + w.obuf[len(magicChunk)+0] = chunkType + w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) + w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) + w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) + w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) + w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) + w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) + w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) + + if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { + w.err = err + return nRet, err + } + if chunkType == chunkTypeUncompressedData { + if _, err := w.w.Write(uncompressed); err != nil { + w.err = err + return nRet, err + } + } + nRet += len(uncompressed) + } + return nRet, nil +} + +// Flush flushes the Writer to its underlying io.Writer. +func (w *Writer) Flush() error { + if w.err != nil { + return w.err + } + if len(w.ibuf) == 0 { + return nil + } + w.write(w.ibuf) + w.ibuf = w.ibuf[:0] + return w.err +} + +// Close calls Flush and then closes the Writer. +func (w *Writer) Close() error { + w.Flush() + ret := w.err + if w.err == nil { + w.err = errClosed + } + return ret +} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go new file mode 100644 index 00000000..2754bac6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go @@ -0,0 +1,250 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +func load32(b []byte, i int) uint32 { + b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load64(b []byte, i int) uint64 { + b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 1 <= len(lit) && len(lit) <= 65536 +func emitLiteral(dst, lit []byte) int { + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[0] = 60<<2 | tagLiteral + dst[1] = uint8(n) + i = 2 + default: + dst[0] = 61<<2 | tagLiteral + dst[1] = uint8(n) + dst[2] = uint8(n >> 8) + i = 3 + } + return i + copy(dst[i:], lit) +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= 65535 +// 4 <= length && length <= 65535 +func emitCopy(dst []byte, offset, length int) int { + i := 0 + // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The + // threshold for this loop is a little higher (at 68 = 64 + 4), and the + // length emitted down below is a little lower (at 60 = 64 - 4), because + // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed + // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as + // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as + // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a + // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an + // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. + for length >= 68 { + // Emit a length 64 copy, encoded as 3 bytes. + dst[i+0] = 63<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 64 + } + if length > 64 { + // Emit a length 60 copy, encoded as 3 bytes. + dst[i+0] = 59<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 60 + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[i+0] = uint8(length-1)<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + return i + 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + dst[i+1] = uint8(offset) + return i + 2 +} + +func hash(u, shift uint32) uint32 { + return (u * 0x1e35a7bd) >> shift +} + +// EncodeBlockInto exposes encodeBlock but checks dst size. +func EncodeBlockInto(dst, src []byte) (d int) { + if MaxEncodedLen(len(src)) > len(dst) { + return 0 + } + + // encodeBlock breaks on too big blocks, so split. + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return d +} + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlock(dst, src []byte) (d int) { + // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. + // The table element type is uint16, as s < sLimit and sLimit < len(src) + // and len(src) <= maxBlockSize and maxBlockSize == 65536. + const ( + maxTableSize = 1 << 14 + // tableMask is redundant, but helps the compiler eliminate bounds + // checks. + tableMask = maxTableSize - 1 + ) + shift := uint32(32 - 8) + for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + shift-- + } + // In Go, all array elements are zero-initialized, so there is no advantage + // to a smaller tableSize per se. However, it matches the C++ algorithm, + // and in the asm versions of this code, we can get away with zeroing only + // the first tableSize elements. + var table [maxTableSize]uint16 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + nextHash := hash(load32(src, s), shift) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := 32 + + nextS := s + candidate := 0 + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidate = int(table[nextHash&tableMask]) + table[nextHash&tableMask] = uint16(s) + nextHash = hash(load32(src, nextS), shift) + if load32(src, s) == load32(src, candidate) { + break + } + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + + // Extend the 4-byte match as long as possible. + // + // This is an inlined version of: + // s = extendMatch(src, candidate+4, s+4) + s += 4 + for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { + } + + d += emitCopy(dst[d:], base-candidate, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load64(src, s-1) + prevHash := hash(uint32(x>>0), shift) + table[prevHash&tableMask] = uint16(s - 1) + currHash := hash(uint32(x>>8), shift) + candidate = int(table[currHash&tableMask]) + table[currHash&tableMask] = uint16(s) + if uint32(x>>8) != load32(src, candidate) { + nextHash = hash(uint32(x>>16), shift) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/snappy.go b/vendor/github.com/klauspost/compress/internal/snapref/snappy.go new file mode 100644 index 00000000..34d01f4a --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/snappy.go @@ -0,0 +1,98 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package snapref implements the Snappy compression format. It aims for very +// high speeds and reasonable compression. +// +// There are actually two Snappy formats: block and stream. They are related, +// but different: trying to decompress block-compressed data as a Snappy stream +// will fail, and vice versa. The block format is the Decode and Encode +// functions and the stream format is the Reader and Writer types. +// +// The block format, the more common case, is used when the complete size (the +// number of bytes) of the original data is known upfront, at the time +// compression starts. The stream format, also known as the framing format, is +// for when that isn't always true. +// +// The canonical, C++ implementation is at https://github.com/google/snappy and +// it only implements the block format. +package snapref + +import ( + "hash/crc32" +) + +/* +Each encoded block begins with the varint-encoded length of the decoded data, +followed by a sequence of chunks. Chunks begin and end on byte boundaries. The +first byte of each chunk is broken into its 2 least and 6 most significant bits +called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. +Zero means a literal tag. All other values mean a copy tag. + +For literal tags: + - If m < 60, the next 1 + m bytes are literal bytes. + - Otherwise, let n be the little-endian unsigned integer denoted by the next + m - 59 bytes. The next 1 + n bytes after that are literal bytes. + +For copy tags, length bytes are copied from offset bytes ago, in the style of +Lempel-Ziv compression algorithms. In particular: + - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). + The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 + of the offset. The next byte is bits 0-7 of the offset. + - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). + The length is 1 + m. The offset is the little-endian unsigned integer + denoted by the next 2 bytes. + - For l == 3, this tag is a legacy format that is no longer issued by most + encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in + [1, 65). The length is 1 + m. The offset is the little-endian unsigned + integer denoted by the next 4 bytes. +*/ +const ( + tagLiteral = 0x00 + tagCopy1 = 0x01 + tagCopy2 = 0x02 + tagCopy4 = 0x03 +) + +const ( + checksumSize = 4 + chunkHeaderSize = 4 + magicChunk = "\xff\x06\x00\x00" + magicBody + magicBody = "sNaPpY" + + // maxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + maxBlockSize = 65536 + + // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + maxEncodedLenOfMaxBlockSize = 76490 + + obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize + obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func crc(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return uint32(c>>15|c<<17) + 0xa282ead8 +} diff --git a/vendor/github.com/klauspost/compress/s2sx.mod b/vendor/github.com/klauspost/compress/s2sx.mod new file mode 100644 index 00000000..5a4412f9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2sx.mod @@ -0,0 +1,4 @@ +module github.com/klauspost/compress + +go 1.19 + diff --git a/vendor/github.com/klauspost/compress/s2sx.sum b/vendor/github.com/klauspost/compress/s2sx.sum new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md new file mode 100644 index 00000000..92e2347b --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/README.md @@ -0,0 +1,441 @@ +# zstd + +[Zstandard](https://facebook.github.io/zstd/) is a real-time compression algorithm, providing high compression ratios. +It offers a very wide range of compression / speed trade-off, while being backed by a very fast decoder. +A high performance compression algorithm is implemented. For now focused on speed. + +This package provides [compression](#Compressor) to and [decompression](#Decompressor) of Zstandard content. + +This package is pure Go and without use of "unsafe". + +The `zstd` package is provided as open source software using a Go standard license. + +Currently the package is heavily optimized for 64 bit processors and will be significantly slower on 32 bit processors. + +For seekable zstd streams, see [this excellent package](https://github.com/SaveTheRbtz/zstd-seekable-format-go). + +## Installation + +Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`. + +[![Go Reference](https://pkg.go.dev/badge/github.com/klauspost/compress/zstd.svg)](https://pkg.go.dev/github.com/klauspost/compress/zstd) + +## Compressor + +### Status: + +STABLE - there may always be subtle bugs, a wide variety of content has been tested and the library is actively +used by several projects. This library is being [fuzz-tested](https://github.com/klauspost/compress-fuzz) for all updates. + +There may still be specific combinations of data types/size/settings that could lead to edge cases, +so as always, testing is recommended. + +For now, a high speed (fastest) and medium-fast (default) compressor has been implemented. + +* The "Fastest" compression ratio is roughly equivalent to zstd level 1. +* The "Default" compression ratio is roughly equivalent to zstd level 3 (default). +* The "Better" compression ratio is roughly equivalent to zstd level 7. +* The "Best" compression ratio is roughly equivalent to zstd level 11. + +In terms of speed, it is typically 2x as fast as the stdlib deflate/gzip in its fastest mode. +The compression ratio compared to stdlib is around level 3, but usually 3x as fast. + + +### Usage + +An Encoder can be used for either compressing a stream via the +`io.WriteCloser` interface supported by the Encoder or as multiple independent +tasks via the `EncodeAll` function. +Smaller encodes are encouraged to use the EncodeAll function. +Use `NewWriter` to create a new instance that can be used for both. + +To create a writer with default options, do like this: + +```Go +// Compress input to output. +func Compress(in io.Reader, out io.Writer) error { + enc, err := zstd.NewWriter(out) + if err != nil { + return err + } + _, err = io.Copy(enc, in) + if err != nil { + enc.Close() + return err + } + return enc.Close() +} +``` + +Now you can encode by writing data to `enc`. The output will be finished writing when `Close()` is called. +Even if your encode fails, you should still call `Close()` to release any resources that may be held up. + +The above is fine for big encodes. However, whenever possible try to *reuse* the writer. + +To reuse the encoder, you can use the `Reset(io.Writer)` function to change to another output. +This will allow the encoder to reuse all resources and avoid wasteful allocations. + +Currently stream encoding has 'light' concurrency, meaning up to 2 goroutines can be working on part +of a stream. This is independent of the `WithEncoderConcurrency(n)`, but that is likely to change +in the future. So if you want to limit concurrency for future updates, specify the concurrency +you would like. + +If you would like stream encoding to be done without spawning async goroutines, use `WithEncoderConcurrency(1)` +which will compress input as each block is completed, blocking on writes until each has completed. + +You can specify your desired compression level using `WithEncoderLevel()` option. Currently only pre-defined +compression settings can be specified. + +#### Future Compatibility Guarantees + +This will be an evolving project. When using this package it is important to note that both the compression efficiency and speed may change. + +The goal will be to keep the default efficiency at the default zstd (level 3). +However the encoding should never be assumed to remain the same, +and you should not use hashes of compressed output for similarity checks. + +The Encoder can be assumed to produce the same output from the exact same code version. +However, the may be modes in the future that break this, +although they will not be enabled without an explicit option. + +This encoder is not designed to (and will probably never) output the exact same bitstream as the reference encoder. + +Also note, that the cgo decompressor currently does not [report all errors on invalid input](https://github.com/DataDog/zstd/issues/59), +[omits error checks](https://github.com/DataDog/zstd/issues/61), [ignores checksums](https://github.com/DataDog/zstd/issues/43) +and seems to ignore concatenated streams, even though [it is part of the spec](https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frames). + +#### Blocks + +For compressing small blocks, the returned encoder has a function called `EncodeAll(src, dst []byte) []byte`. + +`EncodeAll` will encode all input in src and append it to dst. +This function can be called concurrently. +Each call will only run on a same goroutine as the caller. + +Encoded blocks can be concatenated and the result will be the combined input stream. +Data compressed with EncodeAll can be decoded with the Decoder, using either a stream or `DecodeAll`. + +Especially when encoding blocks you should take special care to reuse the encoder. +This will effectively make it run without allocations after a warmup period. +To make it run completely without allocations, supply a destination buffer with space for all content. + +```Go +import "github.com/klauspost/compress/zstd" + +// Create a writer that caches compressors. +// For this operation type we supply a nil Reader. +var encoder, _ = zstd.NewWriter(nil) + +// Compress a buffer. +// If you have a destination buffer, the allocation in the call can also be eliminated. +func Compress(src []byte) []byte { + return encoder.EncodeAll(src, make([]byte, 0, len(src))) +} +``` + +You can control the maximum number of concurrent encodes using the `WithEncoderConcurrency(n)` +option when creating the writer. + +Using the Encoder for both a stream and individual blocks concurrently is safe. + +### Performance + +I have collected some speed examples to compare speed and compression against other compressors. + +* `file` is the input file. +* `out` is the compressor used. `zskp` is this package. `zstd` is the Datadog cgo library. `gzstd/gzkp` is gzip standard and this library. +* `level` is the compression level used. For `zskp` level 1 is "fastest", level 2 is "default"; 3 is "better", 4 is "best". +* `insize`/`outsize` is the input/output size. +* `millis` is the number of milliseconds used for compression. +* `mb/s` is megabytes (2^20 bytes) per second. + +``` +Silesia Corpus: +http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip + +This package: +file out level insize outsize millis mb/s +silesia.tar zskp 1 211947520 73821326 634 318.47 +silesia.tar zskp 2 211947520 67655404 1508 133.96 +silesia.tar zskp 3 211947520 64746933 3000 67.37 +silesia.tar zskp 4 211947520 60073508 16926 11.94 + +cgo zstd: +silesia.tar zstd 1 211947520 73605392 543 371.56 +silesia.tar zstd 3 211947520 66793289 864 233.68 +silesia.tar zstd 6 211947520 62916450 1913 105.66 +silesia.tar zstd 9 211947520 60212393 5063 39.92 + +gzip, stdlib/this package: +silesia.tar gzstd 1 211947520 80007735 1498 134.87 +silesia.tar gzkp 1 211947520 80088272 1009 200.31 + +GOB stream of binary data. Highly compressible. +https://files.klauspost.com/compress/gob-stream.7z + +file out level insize outsize millis mb/s +gob-stream zskp 1 1911399616 233948096 3230 564.34 +gob-stream zskp 2 1911399616 203997694 4997 364.73 +gob-stream zskp 3 1911399616 173526523 13435 135.68 +gob-stream zskp 4 1911399616 162195235 47559 38.33 + +gob-stream zstd 1 1911399616 249810424 2637 691.26 +gob-stream zstd 3 1911399616 208192146 3490 522.31 +gob-stream zstd 6 1911399616 193632038 6687 272.56 +gob-stream zstd 9 1911399616 177620386 16175 112.70 + +gob-stream gzstd 1 1911399616 357382013 9046 201.49 +gob-stream gzkp 1 1911399616 359136669 4885 373.08 + +The test data for the Large Text Compression Benchmark is the first +10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. +http://mattmahoney.net/dc/textdata.html + +file out level insize outsize millis mb/s +enwik9 zskp 1 1000000000 343833605 3687 258.64 +enwik9 zskp 2 1000000000 317001237 7672 124.29 +enwik9 zskp 3 1000000000 291915823 15923 59.89 +enwik9 zskp 4 1000000000 261710291 77697 12.27 + +enwik9 zstd 1 1000000000 358072021 3110 306.65 +enwik9 zstd 3 1000000000 313734672 4784 199.35 +enwik9 zstd 6 1000000000 295138875 10290 92.68 +enwik9 zstd 9 1000000000 278348700 28549 33.40 + +enwik9 gzstd 1 1000000000 382578136 8608 110.78 +enwik9 gzkp 1 1000000000 382781160 5628 169.45 + +Highly compressible JSON file. +https://files.klauspost.com/compress/github-june-2days-2019.json.zst + +file out level insize outsize millis mb/s +github-june-2days-2019.json zskp 1 6273951764 697439532 9789 611.17 +github-june-2days-2019.json zskp 2 6273951764 610876538 18553 322.49 +github-june-2days-2019.json zskp 3 6273951764 517662858 44186 135.41 +github-june-2days-2019.json zskp 4 6273951764 464617114 165373 36.18 + +github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00 +github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57 +github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18 +github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16 + +github-june-2days-2019.json gzstd 1 6273951764 1164397768 26793 223.32 +github-june-2days-2019.json gzkp 1 6273951764 1120631856 17693 338.16 + +VM Image, Linux mint with a few installed applications: +https://files.klauspost.com/compress/rawstudio-mint14.7z + +file out level insize outsize millis mb/s +rawstudio-mint14.tar zskp 1 8558382592 3718400221 18206 448.29 +rawstudio-mint14.tar zskp 2 8558382592 3326118337 37074 220.15 +rawstudio-mint14.tar zskp 3 8558382592 3163842361 87306 93.49 +rawstudio-mint14.tar zskp 4 8558382592 2970480650 783862 10.41 + +rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27 +rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92 +rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77 +rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91 + +rawstudio-mint14.tar gzstd 1 8558382592 3926234992 51345 158.96 +rawstudio-mint14.tar gzkp 1 8558382592 3960117298 36722 222.26 + +CSV data: +https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst + +file out level insize outsize millis mb/s +nyc-taxi-data-10M.csv zskp 1 3325605752 641319332 9462 335.17 +nyc-taxi-data-10M.csv zskp 2 3325605752 588976126 17570 180.50 +nyc-taxi-data-10M.csv zskp 3 3325605752 529329260 32432 97.79 +nyc-taxi-data-10M.csv zskp 4 3325605752 474949772 138025 22.98 + +nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18 +nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07 +nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27 +nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12 + +nyc-taxi-data-10M.csv gzstd 1 3325605752 928654908 21270 149.11 +nyc-taxi-data-10M.csv gzkp 1 3325605752 922273214 13929 227.68 +``` + +## Decompressor + +Status: STABLE - there may still be subtle bugs, but a wide variety of content has been tested. + +This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz), +kindly supplied by [fuzzit.dev](https://fuzzit.dev/). +The main purpose of the fuzz testing is to ensure that it is not possible to crash the decoder, +or run it past its limits with ANY input provided. + +### Usage + +The package has been designed for two main usages, big streams of data and smaller in-memory buffers. +There are two main usages of the package for these. Both of them are accessed by creating a `Decoder`. + +For streaming use a simple setup could look like this: + +```Go +import "github.com/klauspost/compress/zstd" + +func Decompress(in io.Reader, out io.Writer) error { + d, err := zstd.NewReader(in) + if err != nil { + return err + } + defer d.Close() + + // Copy content... + _, err = io.Copy(out, d) + return err +} +``` + +It is important to use the "Close" function when you no longer need the Reader to stop running goroutines, +when running with default settings. +Goroutines will exit once an error has been returned, including `io.EOF` at the end of a stream. + +Streams are decoded concurrently in 4 asynchronous stages to give the best possible throughput. +However, if you prefer synchronous decompression, use `WithDecoderConcurrency(1)` which will decompress data +as it is being requested only. + +For decoding buffers, it could look something like this: + +```Go +import "github.com/klauspost/compress/zstd" + +// Create a reader that caches decompressors. +// For this operation type we supply a nil Reader. +var decoder, _ = zstd.NewReader(nil, zstd.WithDecoderConcurrency(0)) + +// Decompress a buffer. We don't supply a destination buffer, +// so it will be allocated by the decoder. +func Decompress(src []byte) ([]byte, error) { + return decoder.DecodeAll(src, nil) +} +``` + +Both of these cases should provide the functionality needed. +The decoder can be used for *concurrent* decompression of multiple buffers. +By default 4 decompressors will be created. + +It will only allow a certain number of concurrent operations to run. +To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder. +It is possible to use `WithDecoderConcurrency(0)` to create GOMAXPROCS decoders. + +### Dictionaries + +Data compressed with [dictionaries](https://github.com/facebook/zstd#the-case-for-small-data-compression) can be decompressed. + +Dictionaries are added individually to Decoders. +Dictionaries are generated by the `zstd --train` command and contains an initial state for the decoder. +To add a dictionary use the `WithDecoderDicts(dicts ...[]byte)` option with the dictionary data. +Several dictionaries can be added at once. + +The dictionary will be used automatically for the data that specifies them. +A re-used Decoder will still contain the dictionaries registered. + +When registering multiple dictionaries with the same ID, the last one will be used. + +It is possible to use dictionaries when compressing data. + +To enable a dictionary use `WithEncoderDict(dict []byte)`. Here only one dictionary will be used +and it will likely be used even if it doesn't improve compression. + +The used dictionary must be used to decompress the content. + +For any real gains, the dictionary should be built with similar data. +If an unsuitable dictionary is used the output may be slightly larger than using no dictionary. +Use the [zstd commandline tool](https://github.com/facebook/zstd/releases) to build a dictionary from sample data. +For information see [zstd dictionary information](https://github.com/facebook/zstd#the-case-for-small-data-compression). + +For now there is a fixed startup performance penalty for compressing content with dictionaries. +This will likely be improved over time. Just be aware to test performance when implementing. + +### Allocation-less operation + +The decoder has been designed to operate without allocations after a warmup. + +This means that you should *store* the decoder for best performance. +To re-use a stream decoder, use the `Reset(r io.Reader) error` to switch to another stream. +A decoder can safely be re-used even if the previous stream failed. + +To release the resources, you must call the `Close()` function on a decoder. +After this it can *no longer be reused*, but all running goroutines will be stopped. +So you *must* use this if you will no longer need the Reader. + +For decompressing smaller buffers a single decoder can be used. +When decoding buffers, you can supply a destination slice with length 0 and your expected capacity. +In this case no unneeded allocations should be made. + +### Concurrency + +The buffer decoder does everything on the same goroutine and does nothing concurrently. +It can however decode several buffers concurrently. Use `WithDecoderConcurrency(n)` to limit that. + +The stream decoder will create goroutines that: + +1) Reads input and splits the input into blocks. +2) Decompression of literals. +3) Decompression of sequences. +4) Reconstruction of output stream. + +So effectively this also means the decoder will "read ahead" and prepare data to always be available for output. + +The concurrency level will, for streams, determine how many blocks ahead the compression will start. + +Since "blocks" are quite dependent on the output of the previous block stream decoding will only have limited concurrency. + +In practice this means that concurrency is often limited to utilizing about 3 cores effectively. + +### Benchmarks + +The first two are streaming decodes and the last are smaller inputs. + +Running on AMD Ryzen 9 3950X 16-Core Processor. AMD64 assembly used. + +``` +BenchmarkDecoderSilesia-32 5 206878840 ns/op 1024.50 MB/s 49808 B/op 43 allocs/op +BenchmarkDecoderEnwik9-32 1 1271809000 ns/op 786.28 MB/s 72048 B/op 52 allocs/op + +Concurrent blocks, performance: + +BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-32 67356 17857 ns/op 10321.96 MB/s 22.48 pct 102 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-32 266656 4421 ns/op 26823.21 MB/s 11.89 pct 19 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-32 20992 56842 ns/op 8477.17 MB/s 39.90 pct 754 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-32 27456 43932 ns/op 9714.01 MB/s 33.27 pct 524 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-32 78432 15047 ns/op 8319.15 MB/s 40.34 pct 66 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-32 65800 18436 ns/op 8249.63 MB/s 37.75 pct 88 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-32 102993 11523 ns/op 35546.09 MB/s 3.637 pct 143 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-32 1000000 1070 ns/op 95720.98 MB/s 80.53 pct 3 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-32 749802 1752 ns/op 70272.35 MB/s 100.0 pct 5 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-32 22640 52934 ns/op 13263.37 MB/s 26.25 pct 1014 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html.zst-32 226412 5232 ns/op 19572.27 MB/s 14.49 pct 20 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-32 923041 1276 ns/op 3194.71 MB/s 31.26 pct 0 B/op 0 allocs/op +``` + +This reflects the performance around May 2022, but this may be out of date. + +## Zstd inside ZIP files + +It is possible to use zstandard to compress individual files inside zip archives. +While this isn't widely supported it can be useful for internal files. + +To support the compression and decompression of these files you must register a compressor and decompressor. + +It is highly recommended registering the (de)compressors on individual zip Reader/Writer and NOT +use the global registration functions. The main reason for this is that 2 registrations from +different packages will result in a panic. + +It is a good idea to only have a single compressor and decompressor, since they can be used for multiple zip +files concurrently, and using a single instance will allow reusing some resources. + +See [this example](https://pkg.go.dev/github.com/klauspost/compress/zstd#example-ZipCompressor) for +how to compress and decompress files inside zip archives. + +# Contributions + +Contributions are always welcome. +For new features/fixes, remember to add tests and for performance enhancements include benchmarks. + +For general feedback and experience reports, feel free to open an issue or write me on [Twitter](https://twitter.com/sh0dan). + +This package includes the excellent [`github.com/cespare/xxhash`](https://github.com/cespare/xxhash) package Copyright (c) 2016 Caleb Spare. diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go new file mode 100644 index 00000000..25ca9839 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bitreader.go @@ -0,0 +1,136 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "math/bits" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReader struct { + in []byte + value uint64 // Maybe use [16]byte, but shifting is awkward. + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReader) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.bitsRead += 8 - uint8(highBits(uint32(v))) + return nil +} + +// getBits will return n bits. n can be 0. +func (b *bitReader) getBits(n uint8) int { + if n == 0 /*|| b.bitsRead >= 64 */ { + return 0 + } + return int(b.get32BitsFast(n)) +} + +// get32BitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReader) get32BitsFast(n uint8) uint32 { + const regMask = 64 - 1 + v := uint32((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) + b.bitsRead += n + return v +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReader) fillFast() { + if b.bitsRead < 32 { + return + } + v := b.in[len(b.in)-4:] + b.in = b.in[:len(b.in)-4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 +} + +// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. +func (b *bitReader) fillFastStart() { + v := b.in[len(b.in)-8:] + b.in = b.in[:len(b.in)-8] + b.value = binary.LittleEndian.Uint64(v) + b.bitsRead = 0 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReader) fill() { + if b.bitsRead < 32 { + return + } + if len(b.in) >= 4 { + v := b.in[len(b.in)-4:] + b.in = b.in[:len(b.in)-4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + return + } + + b.bitsRead -= uint8(8 * len(b.in)) + for len(b.in) > 0 { + b.value = (b.value << 8) | uint64(b.in[len(b.in)-1]) + b.in = b.in[:len(b.in)-1] + } +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReader) finished() bool { + return len(b.in) == 0 && b.bitsRead >= 64 +} + +// overread returns true if more bits have been requested than is on the stream. +func (b *bitReader) overread() bool { + return b.bitsRead > 64 +} + +// remain returns the number of bits remaining. +func (b *bitReader) remain() uint { + return 8*uint(len(b.in)) + 64 - uint(b.bitsRead) +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReader) close() error { + // Release reference. + b.in = nil + if !b.finished() { + return fmt.Errorf("%d extra bits on block, should be 0", b.remain()) + } + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} + +func highBits(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go new file mode 100644 index 00000000..1952f175 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bitwriter.go @@ -0,0 +1,112 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package zstd + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// bitMask16 is bitmasks. Has extra to avoid bounds check. +var bitMask16 = [32]uint16{ + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF} /* up to 16 bits */ + +var bitMask32 = [32]uint32{ + 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, + 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, + 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF, + 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF, +} // up to 32 bits + +// addBits16NC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16NC(value uint16, bits uint8) { + b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits32NC will add up to 31 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits32NC(value uint32, bits uint8) { + b.bitContainer |= uint64(value&bitMask32[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits64NC will add up to 64 bits. +// There must be space for 32 bits. +func (b *bitWriter) addBits64NC(value uint64, bits uint8) { + if bits <= 31 { + b.addBits32Clean(uint32(value), bits) + return + } + b.addBits32Clean(uint32(value), 32) + b.flush32() + b.addBits32Clean(uint32(value>>32), bits-32) +} + +// addBits32Clean will add up to 32 bits. +// It will not check if there is space for them. +// The input must not contain more bits than specified. +func (b *bitWriter) addBits32Clean(value uint32, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() +} + +// reset and continue writing by appending to out. +func (b *bitWriter) reset(out []byte) { + b.bitContainer = 0 + b.nBits = 0 + b.out = out +} diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go new file mode 100644 index 00000000..9c28840c --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -0,0 +1,731 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "hash/crc32" + "io" + "os" + "path/filepath" + "sync" + + "github.com/klauspost/compress/huff0" + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +type blockType uint8 + +//go:generate stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex + +const ( + blockTypeRaw blockType = iota + blockTypeRLE + blockTypeCompressed + blockTypeReserved +) + +type literalsBlockType uint8 + +const ( + literalsBlockRaw literalsBlockType = iota + literalsBlockRLE + literalsBlockCompressed + literalsBlockTreeless +) + +const ( + // maxCompressedBlockSize is the biggest allowed compressed block size (128KB) + maxCompressedBlockSize = 128 << 10 + + compressedBlockOverAlloc = 16 + maxCompressedBlockSizeAlloc = 128<<10 + compressedBlockOverAlloc + + // Maximum possible block size (all Raw+Uncompressed). + maxBlockSize = (1 << 21) - 1 + + maxMatchLen = 131074 + maxSequences = 0x7f00 + 0xffff + + // We support slightly less than the reference decoder to be able to + // use ints on 32 bit archs. + maxOffsetBits = 30 +) + +var ( + huffDecoderPool = sync.Pool{New: func() interface{} { + return &huff0.Scratch{} + }} + + fseDecoderPool = sync.Pool{New: func() interface{} { + return &fseDecoder{} + }} +) + +type blockDec struct { + // Raw source data of the block. + data []byte + dataStorage []byte + + // Destination of the decoded data. + dst []byte + + // Buffer for literals data. + literalBuf []byte + + // Window size of the block. + WindowSize uint64 + + err error + + // Check against this crc, if hasCRC is true. + checkCRC uint32 + hasCRC bool + + // Frame to use for singlethreaded decoding. + // Should not be used by the decoder itself since parent may be another frame. + localFrame *frameDec + + sequence []seqVals + + async struct { + newHist *history + literals []byte + seqData []byte + seqSize int // Size of uncompressed sequences + fcs uint64 + } + + // Block is RLE, this is the size. + RLESize uint32 + + Type blockType + + // Is this the last block of a frame? + Last bool + + // Use less memory + lowMem bool +} + +func (b *blockDec) String() string { + if b == nil { + return "" + } + return fmt.Sprintf("Steam Size: %d, Type: %v, Last: %t, Window: %d", len(b.data), b.Type, b.Last, b.WindowSize) +} + +func newBlockDec(lowMem bool) *blockDec { + b := blockDec{ + lowMem: lowMem, + } + return &b +} + +// reset will reset the block. +// Input must be a start of a block and will be at the end of the block when returned. +func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { + b.WindowSize = windowSize + tmp, err := br.readSmall(3) + if err != nil { + println("Reading block header:", err) + return err + } + bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) + b.Last = bh&1 != 0 + b.Type = blockType((bh >> 1) & 3) + // find size. + cSize := int(bh >> 3) + maxSize := maxCompressedBlockSizeAlloc + switch b.Type { + case blockTypeReserved: + return ErrReservedBlockType + case blockTypeRLE: + if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { + if debugDecoder { + printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrWindowSizeExceeded + } + b.RLESize = uint32(cSize) + if b.lowMem { + maxSize = cSize + } + cSize = 1 + case blockTypeCompressed: + if debugDecoder { + println("Data size on stream:", cSize) + } + b.RLESize = 0 + maxSize = maxCompressedBlockSizeAlloc + if windowSize < maxCompressedBlockSize && b.lowMem { + maxSize = int(windowSize) + compressedBlockOverAlloc + } + if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize { + if debugDecoder { + printf("compressed block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrCompressedSizeTooBig + } + // Empty compressed blocks must at least be 2 bytes + // for Literals_Block_Type and one for Sequences_Section_Header. + if cSize < 2 { + return ErrBlockTooSmall + } + case blockTypeRaw: + if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { + if debugDecoder { + printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrWindowSizeExceeded + } + + b.RLESize = 0 + // We do not need a destination for raw blocks. + maxSize = -1 + default: + panic("Invalid block type") + } + + // Read block data. + if _, ok := br.(*byteBuf); !ok && cap(b.dataStorage) < cSize { + // byteBuf doesn't need a destination buffer. + if b.lowMem || cSize > maxCompressedBlockSize { + b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc) + } else { + b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc) + } + } + b.data, err = br.readBig(cSize, b.dataStorage) + if err != nil { + if debugDecoder { + println("Reading block:", err, "(", cSize, ")", len(b.data)) + printf("%T", br) + } + return err + } + if cap(b.dst) <= maxSize { + b.dst = make([]byte, 0, maxSize+1) + } + return nil +} + +// sendEOF will make the decoder send EOF on this frame. +func (b *blockDec) sendErr(err error) { + b.Last = true + b.Type = blockTypeReserved + b.err = err +} + +// Close will release resources. +// Closed blockDec cannot be reset. +func (b *blockDec) Close() { +} + +// decodeBuf +func (b *blockDec) decodeBuf(hist *history) error { + switch b.Type { + case blockTypeRLE: + if cap(b.dst) < int(b.RLESize) { + if b.lowMem { + b.dst = make([]byte, b.RLESize) + } else { + b.dst = make([]byte, maxCompressedBlockSize) + } + } + b.dst = b.dst[:b.RLESize] + v := b.data[0] + for i := range b.dst { + b.dst[i] = v + } + hist.appendKeep(b.dst) + return nil + case blockTypeRaw: + hist.appendKeep(b.data) + return nil + case blockTypeCompressed: + saved := b.dst + // Append directly to history + if hist.ignoreBuffer == 0 { + b.dst = hist.b + hist.b = nil + } else { + b.dst = b.dst[:0] + } + err := b.decodeCompressed(hist) + if debugDecoder { + println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err) + } + if hist.ignoreBuffer == 0 { + hist.b = b.dst + b.dst = saved + } else { + hist.appendKeep(b.dst) + } + return err + case blockTypeReserved: + // Used for returning errors. + return b.err + default: + panic("Invalid block type") + } +} + +func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err error) { + // There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header + if len(in) < 2 { + return in, ErrBlockTooSmall + } + + litType := literalsBlockType(in[0] & 3) + var litRegenSize int + var litCompSize int + sizeFormat := (in[0] >> 2) & 3 + var fourStreams bool + var literals []byte + switch litType { + case literalsBlockRaw, literalsBlockRLE: + switch sizeFormat { + case 0, 2: + // Regenerated_Size uses 5 bits (0-31). Literals_Section_Header uses 1 byte. + litRegenSize = int(in[0] >> 3) + in = in[1:] + case 1: + // Regenerated_Size uses 12 bits (0-4095). Literals_Section_Header uses 2 bytes. + litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + in = in[2:] + case 3: + // Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes. + if len(in) < 3 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return in, ErrBlockTooSmall + } + litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12) + in = in[3:] + } + case literalsBlockCompressed, literalsBlockTreeless: + switch sizeFormat { + case 0, 1: + // Both Regenerated_Size and Compressed_Size use 10 bits (0-1023). + if len(in) < 3 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return in, ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + litRegenSize = int(n & 1023) + litCompSize = int(n >> 10) + fourStreams = sizeFormat == 1 + in = in[3:] + case 2: + fourStreams = true + if len(in) < 4 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return in, ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + litRegenSize = int(n & 16383) + litCompSize = int(n >> 14) + in = in[4:] + case 3: + fourStreams = true + if len(in) < 5 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return in, ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28) + litRegenSize = int(n & 262143) + litCompSize = int(n >> 18) + in = in[5:] + } + } + if debugDecoder { + println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams) + } + if litRegenSize > int(b.WindowSize) || litRegenSize > maxCompressedBlockSize { + return in, ErrWindowSizeExceeded + } + + switch litType { + case literalsBlockRaw: + if len(in) < litRegenSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize) + return in, ErrBlockTooSmall + } + literals = in[:litRegenSize] + in = in[litRegenSize:] + //printf("Found %d uncompressed literals\n", litRegenSize) + case literalsBlockRLE: + if len(in) < 1 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1) + return in, ErrBlockTooSmall + } + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, litRegenSize, litRegenSize+compressedBlockOverAlloc) + } else { + b.literalBuf = make([]byte, litRegenSize, maxCompressedBlockSize+compressedBlockOverAlloc) + } + } + literals = b.literalBuf[:litRegenSize] + v := in[0] + for i := range literals { + literals[i] = v + } + in = in[1:] + if debugDecoder { + printf("Found %d RLE compressed literals\n", litRegenSize) + } + case literalsBlockTreeless: + if len(in) < litCompSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) + return in, ErrBlockTooSmall + } + // Store compressed literals, so we defer decoding until we get history. + literals = in[:litCompSize] + in = in[litCompSize:] + if debugDecoder { + printf("Found %d compressed literals\n", litCompSize) + } + huff := hist.huffTree + if huff == nil { + return in, errors.New("literal block was treeless, but no history was defined") + } + // Ensure we have space to store it. + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) + } else { + b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) + } + } + var err error + // Use our out buffer. + huff.MaxDecodedSize = litRegenSize + if fourStreams { + literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) + } else { + literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) + } + // Make sure we don't leak our literals buffer + if err != nil { + println("decompressing literals:", err) + return in, err + } + if len(literals) != litRegenSize { + return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + } + + case literalsBlockCompressed: + if len(in) < litCompSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) + return in, ErrBlockTooSmall + } + literals = in[:litCompSize] + in = in[litCompSize:] + // Ensure we have space to store it. + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) + } else { + b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) + } + } + huff := hist.huffTree + if huff == nil || (hist.dict != nil && huff == hist.dict.litEnc) { + huff = huffDecoderPool.Get().(*huff0.Scratch) + if huff == nil { + huff = &huff0.Scratch{} + } + } + var err error + if debugDecoder { + println("huff table input:", len(literals), "CRC:", crc32.ChecksumIEEE(literals)) + } + huff, literals, err = huff0.ReadTable(literals, huff) + if err != nil { + println("reading huffman table:", err) + return in, err + } + hist.huffTree = huff + huff.MaxDecodedSize = litRegenSize + // Use our out buffer. + if fourStreams { + literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) + } else { + literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) + } + if err != nil { + println("decoding compressed literals:", err) + return in, err + } + // Make sure we don't leak our literals buffer + if len(literals) != litRegenSize { + return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + } + // Re-cap to get extra size. + literals = b.literalBuf[:len(literals)] + if debugDecoder { + printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize) + } + } + hist.decoders.literals = literals + return in, nil +} + +// decodeCompressed will start decompressing a block. +func (b *blockDec) decodeCompressed(hist *history) error { + in := b.data + in, err := b.decodeLiterals(in, hist) + if err != nil { + return err + } + err = b.prepareSequences(in, hist) + if err != nil { + return err + } + if hist.decoders.nSeqs == 0 { + b.dst = append(b.dst, hist.decoders.literals...) + return nil + } + before := len(hist.decoders.out) + err = hist.decoders.decodeSync(hist.b[hist.ignoreBuffer:]) + if err != nil { + return err + } + if hist.decoders.maxSyncLen > 0 { + hist.decoders.maxSyncLen += uint64(before) + hist.decoders.maxSyncLen -= uint64(len(hist.decoders.out)) + } + b.dst = hist.decoders.out + hist.recentOffsets = hist.decoders.prevOffset + return nil +} + +func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { + if debugDecoder { + printf("prepareSequences: %d byte(s) input\n", len(in)) + } + // Decode Sequences + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section + if len(in) < 1 { + return ErrBlockTooSmall + } + var nSeqs int + seqHeader := in[0] + switch { + case seqHeader < 128: + nSeqs = int(seqHeader) + in = in[1:] + case seqHeader < 255: + if len(in) < 2 { + return ErrBlockTooSmall + } + nSeqs = int(seqHeader-128)<<8 | int(in[1]) + in = in[2:] + case seqHeader == 255: + if len(in) < 3 { + return ErrBlockTooSmall + } + nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8) + in = in[3:] + } + if nSeqs == 0 && len(in) != 0 { + // When no sequences, there should not be any more data... + if debugDecoder { + printf("prepareSequences: 0 sequences, but %d byte(s) left on stream\n", len(in)) + } + return ErrUnexpectedBlockSize + } + + var seqs = &hist.decoders + seqs.nSeqs = nSeqs + if nSeqs > 0 { + if len(in) < 1 { + return ErrBlockTooSmall + } + br := byteReader{b: in, off: 0} + compMode := br.Uint8() + br.advance(1) + if debugDecoder { + printf("Compression modes: 0b%b", compMode) + } + if compMode&3 != 0 { + return errors.New("corrupt block: reserved bits not zero") + } + for i := uint(0); i < 3; i++ { + mode := seqCompMode((compMode >> (6 - i*2)) & 3) + if debugDecoder { + println("Table", tableIndex(i), "is", mode) + } + var seq *sequenceDec + switch tableIndex(i) { + case tableLiteralLengths: + seq = &seqs.litLengths + case tableOffsets: + seq = &seqs.offsets + case tableMatchLengths: + seq = &seqs.matchLengths + default: + panic("unknown table") + } + switch mode { + case compModePredefined: + if seq.fse != nil && !seq.fse.preDefined { + fseDecoderPool.Put(seq.fse) + } + seq.fse = &fsePredef[i] + case compModeRLE: + if br.remain() < 1 { + return ErrBlockTooSmall + } + v := br.Uint8() + br.advance(1) + if seq.fse == nil || seq.fse.preDefined { + seq.fse = fseDecoderPool.Get().(*fseDecoder) + } + symb, err := decSymbolValue(v, symbolTableX[i]) + if err != nil { + printf("RLE Transform table (%v) error: %v", tableIndex(i), err) + return err + } + seq.fse.setRLE(symb) + if debugDecoder { + printf("RLE set to 0x%x, code: %v", symb, v) + } + case compModeFSE: + if debugDecoder { + println("Reading table for", tableIndex(i)) + } + if seq.fse == nil || seq.fse.preDefined { + seq.fse = fseDecoderPool.Get().(*fseDecoder) + } + err := seq.fse.readNCount(&br, uint16(maxTableSymbol[i])) + if err != nil { + println("Read table error:", err) + return err + } + err = seq.fse.transform(symbolTableX[i]) + if err != nil { + println("Transform table error:", err) + return err + } + if debugDecoder { + println("Read table ok", "symbolLen:", seq.fse.symbolLen) + } + case compModeRepeat: + seq.repeat = true + } + if br.overread() { + return io.ErrUnexpectedEOF + } + } + in = br.unread() + } + if debugDecoder { + println("Literals:", len(seqs.literals), "hash:", xxhash.Sum64(seqs.literals), "and", seqs.nSeqs, "sequences.") + } + + if nSeqs == 0 { + if len(b.sequence) > 0 { + b.sequence = b.sequence[:0] + } + return nil + } + br := seqs.br + if br == nil { + br = &bitReader{} + } + if err := br.init(in); err != nil { + return err + } + + if err := seqs.initialize(br, hist, b.dst); err != nil { + println("initializing sequences:", err) + return err + } + // Extract blocks... + if false && hist.dict == nil { + fatalErr := func(err error) { + if err != nil { + panic(err) + } + } + fn := fmt.Sprintf("n-%d-lits-%d-prev-%d-%d-%d-win-%d.blk", hist.decoders.nSeqs, len(hist.decoders.literals), hist.recentOffsets[0], hist.recentOffsets[1], hist.recentOffsets[2], hist.windowSize) + var buf bytes.Buffer + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.litLengths.fse)) + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse)) + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse)) + buf.Write(in) + os.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm) + } + + return nil +} + +func (b *blockDec) decodeSequences(hist *history) error { + if cap(b.sequence) < hist.decoders.nSeqs { + if b.lowMem { + b.sequence = make([]seqVals, 0, hist.decoders.nSeqs) + } else { + b.sequence = make([]seqVals, 0, 0x7F00+0xffff) + } + } + b.sequence = b.sequence[:hist.decoders.nSeqs] + if hist.decoders.nSeqs == 0 { + hist.decoders.seqSize = len(hist.decoders.literals) + return nil + } + hist.decoders.windowSize = hist.windowSize + hist.decoders.prevOffset = hist.recentOffsets + + err := hist.decoders.decode(b.sequence) + hist.recentOffsets = hist.decoders.prevOffset + return err +} + +func (b *blockDec) executeSequences(hist *history) error { + hbytes := hist.b + if len(hbytes) > hist.windowSize { + hbytes = hbytes[len(hbytes)-hist.windowSize:] + // We do not need history anymore. + if hist.dict != nil { + hist.dict.content = nil + } + } + hist.decoders.windowSize = hist.windowSize + hist.decoders.out = b.dst[:0] + err := hist.decoders.execute(b.sequence, hbytes) + if err != nil { + return err + } + return b.updateHistory(hist) +} + +func (b *blockDec) updateHistory(hist *history) error { + if len(b.data) > maxCompressedBlockSize { + return fmt.Errorf("compressed block size too large (%d)", len(b.data)) + } + // Set output and release references. + b.dst = hist.decoders.out + hist.recentOffsets = hist.decoders.prevOffset + + if b.Last { + // if last block we don't care about history. + println("Last block, no history returned") + hist.b = hist.b[:0] + return nil + } else { + hist.append(b.dst) + if debugDecoder { + println("Finished block with ", len(b.sequence), "sequences. Added", len(b.dst), "to history, now length", len(hist.b)) + } + } + hist.decoders.out, hist.decoders.literals = nil, nil + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go new file mode 100644 index 00000000..32a7f401 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go @@ -0,0 +1,909 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "math" + "math/bits" + + "github.com/klauspost/compress/huff0" +) + +type blockEnc struct { + size int + literals []byte + sequences []seq + coders seqCoders + litEnc *huff0.Scratch + dictLitEnc *huff0.Scratch + wr bitWriter + + extraLits int + output []byte + recentOffsets [3]uint32 + prevRecentOffsets [3]uint32 + + last bool + lowMem bool +} + +// init should be used once the block has been created. +// If called more than once, the effect is the same as calling reset. +func (b *blockEnc) init() { + if b.lowMem { + // 1K literals + if cap(b.literals) < 1<<10 { + b.literals = make([]byte, 0, 1<<10) + } + const defSeqs = 20 + if cap(b.sequences) < defSeqs { + b.sequences = make([]seq, 0, defSeqs) + } + // 1K + if cap(b.output) < 1<<10 { + b.output = make([]byte, 0, 1<<10) + } + } else { + if cap(b.literals) < maxCompressedBlockSize { + b.literals = make([]byte, 0, maxCompressedBlockSize) + } + const defSeqs = 2000 + if cap(b.sequences) < defSeqs { + b.sequences = make([]seq, 0, defSeqs) + } + if cap(b.output) < maxCompressedBlockSize { + b.output = make([]byte, 0, maxCompressedBlockSize) + } + } + + if b.coders.mlEnc == nil { + b.coders.mlEnc = &fseEncoder{} + b.coders.mlPrev = &fseEncoder{} + b.coders.ofEnc = &fseEncoder{} + b.coders.ofPrev = &fseEncoder{} + b.coders.llEnc = &fseEncoder{} + b.coders.llPrev = &fseEncoder{} + } + b.litEnc = &huff0.Scratch{WantLogLess: 4} + b.reset(nil) +} + +// initNewEncode can be used to reset offsets and encoders to the initial state. +func (b *blockEnc) initNewEncode() { + b.recentOffsets = [3]uint32{1, 4, 8} + b.litEnc.Reuse = huff0.ReusePolicyNone + b.coders.setPrev(nil, nil, nil) +} + +// reset will reset the block for a new encode, but in the same stream, +// meaning that state will be carried over, but the block content is reset. +// If a previous block is provided, the recent offsets are carried over. +func (b *blockEnc) reset(prev *blockEnc) { + b.extraLits = 0 + b.literals = b.literals[:0] + b.size = 0 + b.sequences = b.sequences[:0] + b.output = b.output[:0] + b.last = false + if prev != nil { + b.recentOffsets = prev.prevRecentOffsets + } + b.dictLitEnc = nil +} + +// reset will reset the block for a new encode, but in the same stream, +// meaning that state will be carried over, but the block content is reset. +// If a previous block is provided, the recent offsets are carried over. +func (b *blockEnc) swapEncoders(prev *blockEnc) { + b.coders.swap(&prev.coders) + b.litEnc, prev.litEnc = prev.litEnc, b.litEnc +} + +// blockHeader contains the information for a block header. +type blockHeader uint32 + +// setLast sets the 'last' indicator on a block. +func (h *blockHeader) setLast(b bool) { + if b { + *h = *h | 1 + } else { + const mask = (1 << 24) - 2 + *h = *h & mask + } +} + +// setSize will store the compressed size of a block. +func (h *blockHeader) setSize(v uint32) { + const mask = 7 + *h = (*h)&mask | blockHeader(v<<3) +} + +// setType sets the block type. +func (h *blockHeader) setType(t blockType) { + const mask = 1 | (((1 << 24) - 1) ^ 7) + *h = (*h & mask) | blockHeader(t<<1) +} + +// appendTo will append the block header to a slice. +func (h blockHeader) appendTo(b []byte) []byte { + return append(b, uint8(h), uint8(h>>8), uint8(h>>16)) +} + +// String returns a string representation of the block. +func (h blockHeader) String() string { + return fmt.Sprintf("Type: %d, Size: %d, Last:%t", (h>>1)&3, h>>3, h&1 == 1) +} + +// literalsHeader contains literals header information. +type literalsHeader uint64 + +// setType can be used to set the type of literal block. +func (h *literalsHeader) setType(t literalsBlockType) { + const mask = math.MaxUint64 - 3 + *h = (*h & mask) | literalsHeader(t) +} + +// setSize can be used to set a single size, for uncompressed and RLE content. +func (h *literalsHeader) setSize(regenLen int) { + inBits := bits.Len32(uint32(regenLen)) + // Only retain 2 bits + const mask = 3 + lh := uint64(*h & mask) + switch { + case inBits < 5: + lh |= (uint64(regenLen) << 3) | (1 << 60) + if debugEncoder { + got := int(lh>>3) & 0xff + if got != regenLen { + panic(fmt.Sprint("litRegenSize = ", regenLen, "(want) != ", got, "(got)")) + } + } + case inBits < 12: + lh |= (1 << 2) | (uint64(regenLen) << 4) | (2 << 60) + case inBits < 20: + lh |= (3 << 2) | (uint64(regenLen) << 4) | (3 << 60) + default: + panic(fmt.Errorf("internal error: block too big (%d)", regenLen)) + } + *h = literalsHeader(lh) +} + +// setSizes will set the size of a compressed literals section and the input length. +func (h *literalsHeader) setSizes(compLen, inLen int, single bool) { + compBits, inBits := bits.Len32(uint32(compLen)), bits.Len32(uint32(inLen)) + // Only retain 2 bits + const mask = 3 + lh := uint64(*h & mask) + switch { + case compBits <= 10 && inBits <= 10: + if !single { + lh |= 1 << 2 + } + lh |= (uint64(inLen) << 4) | (uint64(compLen) << (10 + 4)) | (3 << 60) + if debugEncoder { + const mmask = (1 << 24) - 1 + n := (lh >> 4) & mmask + if int(n&1023) != inLen { + panic(fmt.Sprint("regensize:", int(n&1023), "!=", inLen, inBits)) + } + if int(n>>10) != compLen { + panic(fmt.Sprint("compsize:", int(n>>10), "!=", compLen, compBits)) + } + } + case compBits <= 14 && inBits <= 14: + lh |= (2 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (14 + 4)) | (4 << 60) + if single { + panic("single stream used with more than 10 bits length.") + } + case compBits <= 18 && inBits <= 18: + lh |= (3 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (18 + 4)) | (5 << 60) + if single { + panic("single stream used with more than 10 bits length.") + } + default: + panic("internal error: block too big") + } + *h = literalsHeader(lh) +} + +// appendTo will append the literals header to a byte slice. +func (h literalsHeader) appendTo(b []byte) []byte { + size := uint8(h >> 60) + switch size { + case 1: + b = append(b, uint8(h)) + case 2: + b = append(b, uint8(h), uint8(h>>8)) + case 3: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16)) + case 4: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24)) + case 5: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24), uint8(h>>32)) + default: + panic(fmt.Errorf("internal error: literalsHeader has invalid size (%d)", size)) + } + return b +} + +// size returns the output size with currently set values. +func (h literalsHeader) size() int { + return int(h >> 60) +} + +func (h literalsHeader) String() string { + return fmt.Sprintf("Type: %d, SizeFormat: %d, Size: 0x%d, Bytes:%d", literalsBlockType(h&3), (h>>2)&3, h&((1<<60)-1)>>4, h>>60) +} + +// pushOffsets will push the recent offsets to the backup store. +func (b *blockEnc) pushOffsets() { + b.prevRecentOffsets = b.recentOffsets +} + +// pushOffsets will push the recent offsets to the backup store. +func (b *blockEnc) popOffsets() { + b.recentOffsets = b.prevRecentOffsets +} + +// matchOffset will adjust recent offsets and return the adjusted one, +// if it matches a previous offset. +func (b *blockEnc) matchOffset(offset, lits uint32) uint32 { + // Check if offset is one of the recent offsets. + // Adjusts the output offset accordingly. + // Gives a tiny bit of compression, typically around 1%. + if true { + if lits > 0 { + switch offset { + case b.recentOffsets[0]: + offset = 1 + case b.recentOffsets[1]: + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 2 + case b.recentOffsets[2]: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 3 + default: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset += 3 + } + } else { + switch offset { + case b.recentOffsets[1]: + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 1 + case b.recentOffsets[2]: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 2 + case b.recentOffsets[0] - 1: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 3 + default: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset += 3 + } + } + } else { + offset += 3 + } + return offset +} + +// encodeRaw can be used to set the output to a raw representation of supplied bytes. +func (b *blockEnc) encodeRaw(a []byte) { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(a))) + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output[:0]) + b.output = append(b.output, a...) + if debugEncoder { + println("Adding RAW block, length", len(a), "last:", b.last) + } +} + +// encodeRaw can be used to set the output to a raw representation of supplied bytes. +func (b *blockEnc) encodeRawTo(dst, src []byte) []byte { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(src))) + bh.setType(blockTypeRaw) + dst = bh.appendTo(dst) + dst = append(dst, src...) + if debugEncoder { + println("Adding RAW block, length", len(src), "last:", b.last) + } + return dst +} + +// encodeLits can be used if the block is only litLen. +func (b *blockEnc) encodeLits(lits []byte, raw bool) error { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(lits))) + + // Don't compress extremely small blocks + if len(lits) < 8 || (len(lits) < 32 && b.dictLitEnc == nil) || raw { + if debugEncoder { + println("Adding RAW block, length", len(lits), "last:", b.last) + } + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output) + b.output = append(b.output, lits...) + return nil + } + + var ( + out []byte + reUsed, single bool + err error + ) + if b.dictLitEnc != nil { + b.litEnc.TransferCTable(b.dictLitEnc) + b.litEnc.Reuse = huff0.ReusePolicyAllow + b.dictLitEnc = nil + } + if len(lits) >= 1024 { + // Use 4 Streams. + out, reUsed, err = huff0.Compress4X(lits, b.litEnc) + } else if len(lits) > 16 { + // Use 1 stream + single = true + out, reUsed, err = huff0.Compress1X(lits, b.litEnc) + } else { + err = huff0.ErrIncompressible + } + if err == nil && len(out)+5 > len(lits) { + // If we are close, we may still be worse or equal to raw. + var lh literalsHeader + lh.setSizes(len(out), len(lits), single) + if len(out)+lh.size() >= len(lits) { + err = huff0.ErrIncompressible + } + } + switch err { + case huff0.ErrIncompressible: + if debugEncoder { + println("Adding RAW block, length", len(lits), "last:", b.last) + } + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output) + b.output = append(b.output, lits...) + return nil + case huff0.ErrUseRLE: + if debugEncoder { + println("Adding RLE block, length", len(lits)) + } + bh.setType(blockTypeRLE) + b.output = bh.appendTo(b.output) + b.output = append(b.output, lits[0]) + return nil + case nil: + default: + return err + } + // Compressed... + // Now, allow reuse + b.litEnc.Reuse = huff0.ReusePolicyAllow + bh.setType(blockTypeCompressed) + var lh literalsHeader + if reUsed { + if debugEncoder { + println("Reused tree, compressed to", len(out)) + } + lh.setType(literalsBlockTreeless) + } else { + if debugEncoder { + println("New tree, compressed to", len(out), "tree size:", len(b.litEnc.OutTable)) + } + lh.setType(literalsBlockCompressed) + } + // Set sizes + lh.setSizes(len(out), len(lits), single) + bh.setSize(uint32(len(out) + lh.size() + 1)) + + // Write block headers. + b.output = bh.appendTo(b.output) + b.output = lh.appendTo(b.output) + // Add compressed data. + b.output = append(b.output, out...) + // No sequences. + b.output = append(b.output, 0) + return nil +} + +// encodeRLE will encode an RLE block. +func (b *blockEnc) encodeRLE(val byte, length uint32) { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(length) + bh.setType(blockTypeRLE) + b.output = bh.appendTo(b.output) + b.output = append(b.output, val) +} + +// fuzzFseEncoder can be used to fuzz the FSE encoder. +func fuzzFseEncoder(data []byte) int { + if len(data) > maxSequences || len(data) < 2 { + return 0 + } + enc := fseEncoder{} + hist := enc.Histogram() + maxSym := uint8(0) + for i, v := range data { + v = v & 63 + data[i] = v + hist[v]++ + if v > maxSym { + maxSym = v + } + } + if maxSym == 0 { + // All 0 + return 0 + } + maxCount := func(a []uint32) int { + var max uint32 + for _, v := range a { + if v > max { + max = v + } + } + return int(max) + } + cnt := maxCount(hist[:maxSym]) + if cnt == len(data) { + // RLE + return 0 + } + enc.HistogramFinished(maxSym, cnt) + err := enc.normalizeCount(len(data)) + if err != nil { + return 0 + } + _, err = enc.writeCount(nil) + if err != nil { + panic(err) + } + return 1 +} + +// encode will encode the block and append the output in b.output. +// Previous offset codes must be pushed if more blocks are expected. +func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { + if len(b.sequences) == 0 { + return b.encodeLits(b.literals, rawAllLits) + } + if len(b.sequences) == 1 && len(org) > 0 && len(b.literals) <= 1 { + // Check common RLE cases. + seq := b.sequences[0] + if seq.litLen == uint32(len(b.literals)) && seq.offset-3 == 1 { + // Offset == 1 and 0 or 1 literals. + b.encodeRLE(org[0], b.sequences[0].matchLen+zstdMinMatch+seq.litLen) + return nil + } + } + + // We want some difference to at least account for the headers. + saved := b.size - len(b.literals) - (b.size >> 6) + if saved < 16 { + if org == nil { + return errIncompressible + } + b.popOffsets() + return b.encodeLits(org, rawAllLits) + } + + var bh blockHeader + var lh literalsHeader + bh.setLast(b.last) + bh.setType(blockTypeCompressed) + // Store offset of the block header. Needed when we know the size. + bhOffset := len(b.output) + b.output = bh.appendTo(b.output) + + var ( + out []byte + reUsed, single bool + err error + ) + if b.dictLitEnc != nil { + b.litEnc.TransferCTable(b.dictLitEnc) + b.litEnc.Reuse = huff0.ReusePolicyAllow + b.dictLitEnc = nil + } + if len(b.literals) >= 1024 && !raw { + // Use 4 Streams. + out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc) + } else if len(b.literals) > 16 && !raw { + // Use 1 stream + single = true + out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc) + } else { + err = huff0.ErrIncompressible + } + + if err == nil && len(out)+5 > len(b.literals) { + // If we are close, we may still be worse or equal to raw. + var lh literalsHeader + lh.setSize(len(b.literals)) + szRaw := lh.size() + lh.setSizes(len(out), len(b.literals), single) + szComp := lh.size() + if len(out)+szComp >= len(b.literals)+szRaw { + err = huff0.ErrIncompressible + } + } + switch err { + case huff0.ErrIncompressible: + lh.setType(literalsBlockRaw) + lh.setSize(len(b.literals)) + b.output = lh.appendTo(b.output) + b.output = append(b.output, b.literals...) + if debugEncoder { + println("Adding literals RAW, length", len(b.literals)) + } + case huff0.ErrUseRLE: + lh.setType(literalsBlockRLE) + lh.setSize(len(b.literals)) + b.output = lh.appendTo(b.output) + b.output = append(b.output, b.literals[0]) + if debugEncoder { + println("Adding literals RLE") + } + case nil: + // Compressed litLen... + if reUsed { + if debugEncoder { + println("reused tree") + } + lh.setType(literalsBlockTreeless) + } else { + if debugEncoder { + println("new tree, size:", len(b.litEnc.OutTable)) + } + lh.setType(literalsBlockCompressed) + if debugEncoder { + _, _, err := huff0.ReadTable(out, nil) + if err != nil { + panic(err) + } + } + } + lh.setSizes(len(out), len(b.literals), single) + if debugEncoder { + printf("Compressed %d literals to %d bytes", len(b.literals), len(out)) + println("Adding literal header:", lh) + } + b.output = lh.appendTo(b.output) + b.output = append(b.output, out...) + b.litEnc.Reuse = huff0.ReusePolicyAllow + if debugEncoder { + println("Adding literals compressed") + } + default: + if debugEncoder { + println("Adding literals ERROR:", err) + } + return err + } + // Sequence compression + + // Write the number of sequences + switch { + case len(b.sequences) < 128: + b.output = append(b.output, uint8(len(b.sequences))) + case len(b.sequences) < 0x7f00: // TODO: this could be wrong + n := len(b.sequences) + b.output = append(b.output, 128+uint8(n>>8), uint8(n)) + default: + n := len(b.sequences) - 0x7f00 + b.output = append(b.output, 255, uint8(n), uint8(n>>8)) + } + if debugEncoder { + println("Encoding", len(b.sequences), "sequences") + } + b.genCodes() + llEnc := b.coders.llEnc + ofEnc := b.coders.ofEnc + mlEnc := b.coders.mlEnc + err = llEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + err = ofEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + err = mlEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + + // Choose the best compression mode for each type. + // Will evaluate the new vs predefined and previous. + chooseComp := func(cur, prev, preDef *fseEncoder) (*fseEncoder, seqCompMode) { + // See if predefined/previous is better + hist := cur.count[:cur.symbolLen] + nSize := cur.approxSize(hist) + cur.maxHeaderSize() + predefSize := preDef.approxSize(hist) + prevSize := prev.approxSize(hist) + + // Add a small penalty for new encoders. + // Don't bother with extremely small (<2 byte gains). + nSize = nSize + (nSize+2*8*16)>>4 + switch { + case predefSize <= prevSize && predefSize <= nSize || forcePreDef: + if debugEncoder { + println("Using predefined", predefSize>>3, "<=", nSize>>3) + } + return preDef, compModePredefined + case prevSize <= nSize: + if debugEncoder { + println("Using previous", prevSize>>3, "<=", nSize>>3) + } + return prev, compModeRepeat + default: + if debugEncoder { + println("Using new, predef", predefSize>>3, ". previous:", prevSize>>3, ">", nSize>>3, "header max:", cur.maxHeaderSize()>>3, "bytes") + println("tl:", cur.actualTableLog, "symbolLen:", cur.symbolLen, "norm:", cur.norm[:cur.symbolLen], "hist", cur.count[:cur.symbolLen]) + } + return cur, compModeFSE + } + } + + // Write compression mode + var mode uint8 + if llEnc.useRLE { + mode |= uint8(compModeRLE) << 6 + llEnc.setRLE(b.sequences[0].llCode) + if debugEncoder { + println("llEnc.useRLE") + } + } else { + var m seqCompMode + llEnc, m = chooseComp(llEnc, b.coders.llPrev, &fsePredefEnc[tableLiteralLengths]) + mode |= uint8(m) << 6 + } + if ofEnc.useRLE { + mode |= uint8(compModeRLE) << 4 + ofEnc.setRLE(b.sequences[0].ofCode) + if debugEncoder { + println("ofEnc.useRLE") + } + } else { + var m seqCompMode + ofEnc, m = chooseComp(ofEnc, b.coders.ofPrev, &fsePredefEnc[tableOffsets]) + mode |= uint8(m) << 4 + } + + if mlEnc.useRLE { + mode |= uint8(compModeRLE) << 2 + mlEnc.setRLE(b.sequences[0].mlCode) + if debugEncoder { + println("mlEnc.useRLE, code: ", b.sequences[0].mlCode, "value", b.sequences[0].matchLen) + } + } else { + var m seqCompMode + mlEnc, m = chooseComp(mlEnc, b.coders.mlPrev, &fsePredefEnc[tableMatchLengths]) + mode |= uint8(m) << 2 + } + b.output = append(b.output, mode) + if debugEncoder { + printf("Compression modes: 0b%b", mode) + } + b.output, err = llEnc.writeCount(b.output) + if err != nil { + return err + } + start := len(b.output) + b.output, err = ofEnc.writeCount(b.output) + if err != nil { + return err + } + if false { + println("block:", b.output[start:], "tablelog", ofEnc.actualTableLog, "maxcount:", ofEnc.maxCount) + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", ofEnc.actualTableLog, ofEnc.symbolLen) + for i, v := range ofEnc.norm[:ofEnc.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, ofEnc.count[i], v) + } + } + b.output, err = mlEnc.writeCount(b.output) + if err != nil { + return err + } + + // Maybe in block? + wr := &b.wr + wr.reset(b.output) + + var ll, of, ml cState + + // Current sequence + seq := len(b.sequences) - 1 + s := b.sequences[seq] + llEnc.setBits(llBitsTable[:]) + mlEnc.setBits(mlBitsTable[:]) + ofEnc.setBits(nil) + + llTT, ofTT, mlTT := llEnc.ct.symbolTT[:256], ofEnc.ct.symbolTT[:256], mlEnc.ct.symbolTT[:256] + + // We have 3 bounds checks here (and in the loop). + // Since we are iterating backwards it is kinda hard to avoid. + llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode] + ll.init(wr, &llEnc.ct, llB) + of.init(wr, &ofEnc.ct, ofB) + wr.flush32() + ml.init(wr, &mlEnc.ct, mlB) + + // Each of these lookups also generates a bounds check. + wr.addBits32NC(s.litLen, llB.outBits) + wr.addBits32NC(s.matchLen, mlB.outBits) + wr.flush32() + wr.addBits32NC(s.offset, ofB.outBits) + if debugSequences { + println("Encoded seq", seq, s, "codes:", s.llCode, s.mlCode, s.ofCode, "states:", ll.state, ml.state, of.state, "bits:", llB, mlB, ofB) + } + seq-- + // Store sequences in reverse... + for seq >= 0 { + s = b.sequences[seq] + + ofB := ofTT[s.ofCode] + wr.flush32() // tablelog max is below 8 for each, so it will fill max 24 bits. + //of.encode(ofB) + nbBitsOut := (uint32(of.state) + ofB.deltaNbBits) >> 16 + dstState := int32(of.state>>(nbBitsOut&15)) + int32(ofB.deltaFindState) + wr.addBits16NC(of.state, uint8(nbBitsOut)) + of.state = of.stateTable[dstState] + + // Accumulate extra bits. + outBits := ofB.outBits & 31 + extraBits := uint64(s.offset & bitMask32[outBits]) + extraBitsN := outBits + + mlB := mlTT[s.mlCode] + //ml.encode(mlB) + nbBitsOut = (uint32(ml.state) + mlB.deltaNbBits) >> 16 + dstState = int32(ml.state>>(nbBitsOut&15)) + int32(mlB.deltaFindState) + wr.addBits16NC(ml.state, uint8(nbBitsOut)) + ml.state = ml.stateTable[dstState] + + outBits = mlB.outBits & 31 + extraBits = extraBits<> 16 + dstState = int32(ll.state>>(nbBitsOut&15)) + int32(llB.deltaFindState) + wr.addBits16NC(ll.state, uint8(nbBitsOut)) + ll.state = ll.stateTable[dstState] + + outBits = llB.outBits & 31 + extraBits = extraBits<= b.size { + // Discard and encode as raw block. + b.output = b.encodeRawTo(b.output[:bhOffset], org) + b.popOffsets() + b.litEnc.Reuse = huff0.ReusePolicyNone + return nil + } + + // Size is output minus block header. + bh.setSize(uint32(len(b.output)-bhOffset) - 3) + if debugEncoder { + println("Rewriting block header", bh) + } + _ = bh.appendTo(b.output[bhOffset:bhOffset]) + b.coders.setPrev(llEnc, mlEnc, ofEnc) + return nil +} + +var errIncompressible = errors.New("incompressible") + +func (b *blockEnc) genCodes() { + if len(b.sequences) == 0 { + // nothing to do + return + } + if len(b.sequences) > math.MaxUint16 { + panic("can only encode up to 64K sequences") + } + // No bounds checks after here: + llH := b.coders.llEnc.Histogram() + ofH := b.coders.ofEnc.Histogram() + mlH := b.coders.mlEnc.Histogram() + for i := range llH { + llH[i] = 0 + } + for i := range ofH { + ofH[i] = 0 + } + for i := range mlH { + mlH[i] = 0 + } + + var llMax, ofMax, mlMax uint8 + for i := range b.sequences { + seq := &b.sequences[i] + v := llCode(seq.litLen) + seq.llCode = v + llH[v]++ + if v > llMax { + llMax = v + } + + v = ofCode(seq.offset) + seq.ofCode = v + ofH[v]++ + if v > ofMax { + ofMax = v + } + + v = mlCode(seq.matchLen) + seq.mlCode = v + mlH[v]++ + if v > mlMax { + mlMax = v + if debugAsserts && mlMax > maxMatchLengthSymbol { + panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d), matchlen: %d", mlMax, seq.matchLen)) + } + } + } + maxCount := func(a []uint32) int { + var max uint32 + for _, v := range a { + if v > max { + max = v + } + } + return int(max) + } + if debugAsserts && mlMax > maxMatchLengthSymbol { + panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", mlMax)) + } + if debugAsserts && ofMax > maxOffsetBits { + panic(fmt.Errorf("ofMax > maxOffsetBits (%d)", ofMax)) + } + if debugAsserts && llMax > maxLiteralLengthSymbol { + panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", llMax)) + } + + b.coders.mlEnc.HistogramFinished(mlMax, maxCount(mlH[:mlMax+1])) + b.coders.ofEnc.HistogramFinished(ofMax, maxCount(ofH[:ofMax+1])) + b.coders.llEnc.HistogramFinished(llMax, maxCount(llH[:llMax+1])) +} diff --git a/vendor/github.com/klauspost/compress/zstd/blocktype_string.go b/vendor/github.com/klauspost/compress/zstd/blocktype_string.go new file mode 100644 index 00000000..01a01e48 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/blocktype_string.go @@ -0,0 +1,85 @@ +// Code generated by "stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex"; DO NOT EDIT. + +package zstd + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[blockTypeRaw-0] + _ = x[blockTypeRLE-1] + _ = x[blockTypeCompressed-2] + _ = x[blockTypeReserved-3] +} + +const _blockType_name = "blockTypeRawblockTypeRLEblockTypeCompressedblockTypeReserved" + +var _blockType_index = [...]uint8{0, 12, 24, 43, 60} + +func (i blockType) String() string { + if i >= blockType(len(_blockType_index)-1) { + return "blockType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _blockType_name[_blockType_index[i]:_blockType_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[literalsBlockRaw-0] + _ = x[literalsBlockRLE-1] + _ = x[literalsBlockCompressed-2] + _ = x[literalsBlockTreeless-3] +} + +const _literalsBlockType_name = "literalsBlockRawliteralsBlockRLEliteralsBlockCompressedliteralsBlockTreeless" + +var _literalsBlockType_index = [...]uint8{0, 16, 32, 55, 76} + +func (i literalsBlockType) String() string { + if i >= literalsBlockType(len(_literalsBlockType_index)-1) { + return "literalsBlockType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _literalsBlockType_name[_literalsBlockType_index[i]:_literalsBlockType_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[compModePredefined-0] + _ = x[compModeRLE-1] + _ = x[compModeFSE-2] + _ = x[compModeRepeat-3] +} + +const _seqCompMode_name = "compModePredefinedcompModeRLEcompModeFSEcompModeRepeat" + +var _seqCompMode_index = [...]uint8{0, 18, 29, 40, 54} + +func (i seqCompMode) String() string { + if i >= seqCompMode(len(_seqCompMode_index)-1) { + return "seqCompMode(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _seqCompMode_name[_seqCompMode_index[i]:_seqCompMode_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[tableLiteralLengths-0] + _ = x[tableOffsets-1] + _ = x[tableMatchLengths-2] +} + +const _tableIndex_name = "tableLiteralLengthstableOffsetstableMatchLengths" + +var _tableIndex_index = [...]uint8{0, 19, 31, 48} + +func (i tableIndex) String() string { + if i >= tableIndex(len(_tableIndex_index)-1) { + return "tableIndex(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _tableIndex_name[_tableIndex_index[i]:_tableIndex_index[i+1]] +} diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go new file mode 100644 index 00000000..55a38855 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go @@ -0,0 +1,131 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" + "io" +) + +type byteBuffer interface { + // Read up to 8 bytes. + // Returns io.ErrUnexpectedEOF if this cannot be satisfied. + readSmall(n int) ([]byte, error) + + // Read >8 bytes. + // MAY use the destination slice. + readBig(n int, dst []byte) ([]byte, error) + + // Read a single byte. + readByte() (byte, error) + + // Skip n bytes. + skipN(n int64) error +} + +// in-memory buffer +type byteBuf []byte + +func (b *byteBuf) readSmall(n int) ([]byte, error) { + if debugAsserts && n > 8 { + panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) + } + bb := *b + if len(bb) < n { + return nil, io.ErrUnexpectedEOF + } + r := bb[:n] + *b = bb[n:] + return r, nil +} + +func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) { + bb := *b + if len(bb) < n { + return nil, io.ErrUnexpectedEOF + } + r := bb[:n] + *b = bb[n:] + return r, nil +} + +func (b *byteBuf) readByte() (byte, error) { + bb := *b + if len(bb) < 1 { + return 0, io.ErrUnexpectedEOF + } + r := bb[0] + *b = bb[1:] + return r, nil +} + +func (b *byteBuf) skipN(n int64) error { + bb := *b + if n < 0 { + return fmt.Errorf("negative skip (%d) requested", n) + } + if int64(len(bb)) < n { + return io.ErrUnexpectedEOF + } + *b = bb[n:] + return nil +} + +// wrapper around a reader. +type readerWrapper struct { + r io.Reader + tmp [8]byte +} + +func (r *readerWrapper) readSmall(n int) ([]byte, error) { + if debugAsserts && n > 8 { + panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) + } + n2, err := io.ReadFull(r.r, r.tmp[:n]) + // We only really care about the actual bytes read. + if err != nil { + if err == io.EOF { + return nil, io.ErrUnexpectedEOF + } + if debugDecoder { + println("readSmall: got", n2, "want", n, "err", err) + } + return nil, err + } + return r.tmp[:n], nil +} + +func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) { + if cap(dst) < n { + dst = make([]byte, n) + } + n2, err := io.ReadFull(r.r, dst[:n]) + if err == io.EOF && n > 0 { + err = io.ErrUnexpectedEOF + } + return dst[:n2], err +} + +func (r *readerWrapper) readByte() (byte, error) { + n2, err := io.ReadFull(r.r, r.tmp[:1]) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return 0, err + } + if n2 != 1 { + return 0, io.ErrUnexpectedEOF + } + return r.tmp[0], nil +} + +func (r *readerWrapper) skipN(n int64) error { + n2, err := io.CopyN(io.Discard, r.r, n) + if n2 != n { + err = io.ErrUnexpectedEOF + } + return err +} diff --git a/vendor/github.com/klauspost/compress/zstd/bytereader.go b/vendor/github.com/klauspost/compress/zstd/bytereader.go new file mode 100644 index 00000000..0e59a242 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bytereader.go @@ -0,0 +1,82 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +// byteReader provides a byte reader that reads +// little endian values from a byte stream. +// The input stream is manually advanced. +// The reader performs no bounds checks. +type byteReader struct { + b []byte + off int +} + +// advance the stream b n bytes. +func (b *byteReader) advance(n uint) { + b.off += int(n) +} + +// overread returns whether we have advanced too far. +func (b *byteReader) overread() bool { + return b.off > len(b.b) +} + +// Int32 returns a little endian int32 starting at current offset. +func (b byteReader) Int32() int32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := int32(b2[3]) + v2 := int32(b2[2]) + v1 := int32(b2[1]) + v0 := int32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// Uint8 returns the next byte +func (b *byteReader) Uint8() uint8 { + v := b.b[b.off] + return v +} + +// Uint32 returns a little endian uint32 starting at current offset. +func (b byteReader) Uint32() uint32 { + if r := b.remain(); r < 4 { + // Very rare + v := uint32(0) + for i := 1; i <= r; i++ { + v = (v << 8) | uint32(b.b[len(b.b)-i]) + } + return v + } + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// Uint32NC returns a little endian uint32 starting at current offset. +// The caller must be sure if there are at least 4 bytes left. +func (b byteReader) Uint32NC() uint32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// unread returns the unread portion of the input. +func (b byteReader) unread() []byte { + return b.b[b.off:] +} + +// remain will return the number of bytes remaining. +func (b byteReader) remain() int { + return len(b.b) - b.off +} diff --git a/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/vendor/github.com/klauspost/compress/zstd/decodeheader.go new file mode 100644 index 00000000..6a5a2988 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/decodeheader.go @@ -0,0 +1,261 @@ +// Copyright 2020+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +import ( + "encoding/binary" + "errors" + "io" +) + +// HeaderMaxSize is the maximum size of a Frame and Block Header. +// If less is sent to Header.Decode it *may* still contain enough information. +const HeaderMaxSize = 14 + 3 + +// Header contains information about the first frame and block within that. +type Header struct { + // SingleSegment specifies whether the data is to be decompressed into a + // single contiguous memory segment. + // It implies that WindowSize is invalid and that FrameContentSize is valid. + SingleSegment bool + + // WindowSize is the window of data to keep while decoding. + // Will only be set if SingleSegment is false. + WindowSize uint64 + + // Dictionary ID. + // If 0, no dictionary. + DictionaryID uint32 + + // HasFCS specifies whether FrameContentSize has a valid value. + HasFCS bool + + // FrameContentSize is the expected uncompressed size of the entire frame. + FrameContentSize uint64 + + // Skippable will be true if the frame is meant to be skipped. + // This implies that FirstBlock.OK is false. + Skippable bool + + // SkippableID is the user-specific ID for the skippable frame. + // Valid values are between 0 to 15, inclusive. + SkippableID int + + // SkippableSize is the length of the user data to skip following + // the header. + SkippableSize uint32 + + // HeaderSize is the raw size of the frame header. + // + // For normal frames, it includes the size of the magic number and + // the size of the header (per section 3.1.1.1). + // It does not include the size for any data blocks (section 3.1.1.2) nor + // the size for the trailing content checksum. + // + // For skippable frames, this counts the size of the magic number + // along with the size of the size field of the payload. + // It does not include the size of the skippable payload itself. + // The total frame size is the HeaderSize plus the SkippableSize. + HeaderSize int + + // First block information. + FirstBlock struct { + // OK will be set if first block could be decoded. + OK bool + + // Is this the last block of a frame? + Last bool + + // Is the data compressed? + // If true CompressedSize will be populated. + // Unfortunately DecompressedSize cannot be determined + // without decoding the blocks. + Compressed bool + + // DecompressedSize is the expected decompressed size of the block. + // Will be 0 if it cannot be determined. + DecompressedSize int + + // CompressedSize of the data in the block. + // Does not include the block header. + // Will be equal to DecompressedSize if not Compressed. + CompressedSize int + } + + // If set there is a checksum present for the block content. + // The checksum field at the end is always 4 bytes long. + HasCheckSum bool +} + +// Decode the header from the beginning of the stream. +// This will decode the frame header and the first block header if enough bytes are provided. +// It is recommended to provide at least HeaderMaxSize bytes. +// If the frame header cannot be read an error will be returned. +// If there isn't enough input, io.ErrUnexpectedEOF is returned. +// The FirstBlock.OK will indicate if enough information was available to decode the first block header. +func (h *Header) Decode(in []byte) error { + _, err := h.DecodeAndStrip(in) + return err +} + +// DecodeAndStrip will decode the header from the beginning of the stream +// and on success return the remaining bytes. +// This will decode the frame header and the first block header if enough bytes are provided. +// It is recommended to provide at least HeaderMaxSize bytes. +// If the frame header cannot be read an error will be returned. +// If there isn't enough input, io.ErrUnexpectedEOF is returned. +// The FirstBlock.OK will indicate if enough information was available to decode the first block header. +func (h *Header) DecodeAndStrip(in []byte) (remain []byte, err error) { + *h = Header{} + if len(in) < 4 { + return nil, io.ErrUnexpectedEOF + } + h.HeaderSize += 4 + b, in := in[:4], in[4:] + if string(b) != frameMagic { + if string(b[1:4]) != skippableFrameMagic || b[0]&0xf0 != 0x50 { + return nil, ErrMagicMismatch + } + if len(in) < 4 { + return nil, io.ErrUnexpectedEOF + } + h.HeaderSize += 4 + h.Skippable = true + h.SkippableID = int(b[0] & 0xf) + h.SkippableSize = binary.LittleEndian.Uint32(in) + return in[4:], nil + } + + // Read Window_Descriptor + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor + if len(in) < 1 { + return nil, io.ErrUnexpectedEOF + } + fhd, in := in[0], in[1:] + h.HeaderSize++ + h.SingleSegment = fhd&(1<<5) != 0 + h.HasCheckSum = fhd&(1<<2) != 0 + if fhd&(1<<3) != 0 { + return nil, errors.New("reserved bit set on frame header") + } + + if !h.SingleSegment { + if len(in) < 1 { + return nil, io.ErrUnexpectedEOF + } + var wd byte + wd, in = in[0], in[1:] + h.HeaderSize++ + windowLog := 10 + (wd >> 3) + windowBase := uint64(1) << windowLog + windowAdd := (windowBase / 8) * uint64(wd&0x7) + h.WindowSize = windowBase + windowAdd + } + + // Read Dictionary_ID + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id + if size := fhd & 3; size != 0 { + if size == 3 { + size = 4 + } + if len(in) < int(size) { + return nil, io.ErrUnexpectedEOF + } + b, in = in[:size], in[size:] + h.HeaderSize += int(size) + switch len(b) { + case 1: + h.DictionaryID = uint32(b[0]) + case 2: + h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) + case 4: + h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + } + } + + // Read Frame_Content_Size + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size + var fcsSize int + v := fhd >> 6 + switch v { + case 0: + if h.SingleSegment { + fcsSize = 1 + } + default: + fcsSize = 1 << v + } + + if fcsSize > 0 { + h.HasFCS = true + if len(in) < fcsSize { + return nil, io.ErrUnexpectedEOF + } + b, in = in[:fcsSize], in[fcsSize:] + h.HeaderSize += int(fcsSize) + switch len(b) { + case 1: + h.FrameContentSize = uint64(b[0]) + case 2: + // When FCS_Field_Size is 2, the offset of 256 is added. + h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 + case 4: + h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) + case 8: + d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) + h.FrameContentSize = uint64(d1) | (uint64(d2) << 32) + } + } + + // Frame Header done, we will not fail from now on. + if len(in) < 3 { + return in, nil + } + tmp := in[:3] + bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) + h.FirstBlock.Last = bh&1 != 0 + blockType := blockType((bh >> 1) & 3) + // find size. + cSize := int(bh >> 3) + switch blockType { + case blockTypeReserved: + return in, nil + case blockTypeRLE: + h.FirstBlock.Compressed = true + h.FirstBlock.DecompressedSize = cSize + h.FirstBlock.CompressedSize = 1 + case blockTypeCompressed: + h.FirstBlock.Compressed = true + h.FirstBlock.CompressedSize = cSize + case blockTypeRaw: + h.FirstBlock.DecompressedSize = cSize + h.FirstBlock.CompressedSize = cSize + default: + panic("Invalid block type") + } + + h.FirstBlock.OK = true + return in, nil +} + +// AppendTo will append the encoded header to the dst slice. +// There is no error checking performed on the header values. +func (h *Header) AppendTo(dst []byte) ([]byte, error) { + if h.Skippable { + magic := [4]byte{0x50, 0x2a, 0x4d, 0x18} + magic[0] |= byte(h.SkippableID & 0xf) + dst = append(dst, magic[:]...) + f := h.SkippableSize + return append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)), nil + } + f := frameHeader{ + ContentSize: h.FrameContentSize, + WindowSize: uint32(h.WindowSize), + SingleSegment: h.SingleSegment, + Checksum: h.HasCheckSum, + DictID: h.DictionaryID, + } + return f.appendTo(dst), nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go new file mode 100644 index 00000000..bbca1723 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -0,0 +1,948 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "context" + "encoding/binary" + "io" + "sync" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +// Decoder provides decoding of zstandard streams. +// The decoder has been designed to operate without allocations after a warmup. +// This means that you should store the decoder for best performance. +// To re-use a stream decoder, use the Reset(r io.Reader) error to switch to another stream. +// A decoder can safely be re-used even if the previous stream failed. +// To release the resources, you must call the Close() function on a decoder. +type Decoder struct { + o decoderOptions + + // Unreferenced decoders, ready for use. + decoders chan *blockDec + + // Current read position used for Reader functionality. + current decoderState + + // sync stream decoding + syncStream struct { + decodedFrame uint64 + br readerWrapper + enabled bool + inFrame bool + dstBuf []byte + } + + frame *frameDec + + // Custom dictionaries. + dicts map[uint32]*dict + + // streamWg is the waitgroup for all streams + streamWg sync.WaitGroup +} + +// decoderState is used for maintaining state when the decoder +// is used for streaming. +type decoderState struct { + // current block being written to stream. + decodeOutput + + // output in order to be written to stream. + output chan decodeOutput + + // cancel remaining output. + cancel context.CancelFunc + + // crc of current frame + crc *xxhash.Digest + + flushed bool +} + +var ( + // Check the interfaces we want to support. + _ = io.WriterTo(&Decoder{}) + _ = io.Reader(&Decoder{}) +) + +// NewReader creates a new decoder. +// A nil Reader can be provided in which case Reset can be used to start a decode. +// +// A Decoder can be used in two modes: +// +// 1) As a stream, or +// 2) For stateless decoding using DecodeAll. +// +// Only a single stream can be decoded concurrently, but the same decoder +// can run multiple concurrent stateless decodes. It is even possible to +// use stateless decodes while a stream is being decoded. +// +// The Reset function can be used to initiate a new stream, which will considerably +// reduce the allocations normally caused by NewReader. +func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { + initPredefined() + var d Decoder + d.o.setDefault() + for _, o := range opts { + err := o(&d.o) + if err != nil { + return nil, err + } + } + d.current.crc = xxhash.New() + d.current.flushed = true + + if r == nil { + d.current.err = ErrDecoderNilInput + } + + // Transfer option dicts. + d.dicts = make(map[uint32]*dict, len(d.o.dicts)) + for _, dc := range d.o.dicts { + d.dicts[dc.id] = dc + } + d.o.dicts = nil + + // Create decoders + d.decoders = make(chan *blockDec, d.o.concurrent) + for i := 0; i < d.o.concurrent; i++ { + dec := newBlockDec(d.o.lowMem) + dec.localFrame = newFrameDec(d.o) + d.decoders <- dec + } + + if r == nil { + return &d, nil + } + return &d, d.Reset(r) +} + +// Read bytes from the decompressed stream into p. +// Returns the number of bytes written and any error that occurred. +// When the stream is done, io.EOF will be returned. +func (d *Decoder) Read(p []byte) (int, error) { + var n int + for { + if len(d.current.b) > 0 { + filled := copy(p, d.current.b) + p = p[filled:] + d.current.b = d.current.b[filled:] + n += filled + } + if len(p) == 0 { + break + } + if len(d.current.b) == 0 { + // We have an error and no more data + if d.current.err != nil { + break + } + if !d.nextBlock(n == 0) { + return n, d.current.err + } + } + } + if len(d.current.b) > 0 { + if debugDecoder { + println("returning", n, "still bytes left:", len(d.current.b)) + } + // Only return error at end of block + return n, nil + } + if d.current.err != nil { + d.drainOutput() + } + if debugDecoder { + println("returning", n, d.current.err, len(d.decoders)) + } + return n, d.current.err +} + +// Reset will reset the decoder the supplied stream after the current has finished processing. +// Note that this functionality cannot be used after Close has been called. +// Reset can be called with a nil reader to release references to the previous reader. +// After being called with a nil reader, no other operations than Reset or DecodeAll or Close +// should be used. +func (d *Decoder) Reset(r io.Reader) error { + if d.current.err == ErrDecoderClosed { + return d.current.err + } + + d.drainOutput() + + d.syncStream.br.r = nil + if r == nil { + d.current.err = ErrDecoderNilInput + if len(d.current.b) > 0 { + d.current.b = d.current.b[:0] + } + d.current.flushed = true + return nil + } + + // If bytes buffer and < 5MB, do sync decoding anyway. + if bb, ok := r.(byter); ok && bb.Len() < d.o.decodeBufsBelow && !d.o.limitToCap { + bb2 := bb + if debugDecoder { + println("*bytes.Buffer detected, doing sync decode, len:", bb.Len()) + } + b := bb2.Bytes() + var dst []byte + if cap(d.syncStream.dstBuf) > 0 { + dst = d.syncStream.dstBuf[:0] + } + + dst, err := d.DecodeAll(b, dst) + if err == nil { + err = io.EOF + } + // Save output buffer + d.syncStream.dstBuf = dst + d.current.b = dst + d.current.err = err + d.current.flushed = true + if debugDecoder { + println("sync decode to", len(dst), "bytes, err:", err) + } + return nil + } + // Remove current block. + d.stashDecoder() + d.current.decodeOutput = decodeOutput{} + d.current.err = nil + d.current.flushed = false + d.current.d = nil + d.syncStream.dstBuf = nil + + // Ensure no-one else is still running... + d.streamWg.Wait() + if d.frame == nil { + d.frame = newFrameDec(d.o) + } + + if d.o.concurrent == 1 { + return d.startSyncDecoder(r) + } + + d.current.output = make(chan decodeOutput, d.o.concurrent) + ctx, cancel := context.WithCancel(context.Background()) + d.current.cancel = cancel + d.streamWg.Add(1) + go d.startStreamDecoder(ctx, r, d.current.output) + + return nil +} + +// drainOutput will drain the output until errEndOfStream is sent. +func (d *Decoder) drainOutput() { + if d.current.cancel != nil { + if debugDecoder { + println("cancelling current") + } + d.current.cancel() + d.current.cancel = nil + } + if d.current.d != nil { + if debugDecoder { + printf("re-adding current decoder %p, decoders: %d", d.current.d, len(d.decoders)) + } + d.decoders <- d.current.d + d.current.d = nil + d.current.b = nil + } + if d.current.output == nil || d.current.flushed { + println("current already flushed") + return + } + for v := range d.current.output { + if v.d != nil { + if debugDecoder { + printf("re-adding decoder %p", v.d) + } + d.decoders <- v.d + } + } + d.current.output = nil + d.current.flushed = true +} + +// WriteTo writes data to w until there's no more data to write or when an error occurs. +// The return value n is the number of bytes written. +// Any error encountered during the write is also returned. +func (d *Decoder) WriteTo(w io.Writer) (int64, error) { + var n int64 + for { + if len(d.current.b) > 0 { + n2, err2 := w.Write(d.current.b) + n += int64(n2) + if err2 != nil && (d.current.err == nil || d.current.err == io.EOF) { + d.current.err = err2 + } else if n2 != len(d.current.b) { + d.current.err = io.ErrShortWrite + } + } + if d.current.err != nil { + break + } + d.nextBlock(true) + } + err := d.current.err + if err != nil { + d.drainOutput() + } + if err == io.EOF { + err = nil + } + return n, err +} + +// DecodeAll allows stateless decoding of a blob of bytes. +// Output will be appended to dst, so if the destination size is known +// you can pre-allocate the destination slice to avoid allocations. +// DecodeAll can be used concurrently. +// The Decoder concurrency limits will be respected. +func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { + if d.decoders == nil { + return dst, ErrDecoderClosed + } + + // Grab a block decoder and frame decoder. + block := <-d.decoders + frame := block.localFrame + initialSize := len(dst) + defer func() { + if debugDecoder { + printf("re-adding decoder: %p", block) + } + frame.rawInput = nil + frame.bBuf = nil + if frame.history.decoders.br != nil { + frame.history.decoders.br.in = nil + } + d.decoders <- block + }() + frame.bBuf = input + + for { + frame.history.reset() + err := frame.reset(&frame.bBuf) + if err != nil { + if err == io.EOF { + if debugDecoder { + println("frame reset return EOF") + } + return dst, nil + } + return dst, err + } + if err = d.setDict(frame); err != nil { + return nil, err + } + if frame.WindowSize > d.o.maxWindowSize { + if debugDecoder { + println("window size exceeded:", frame.WindowSize, ">", d.o.maxWindowSize) + } + return dst, ErrWindowSizeExceeded + } + if frame.FrameContentSize != fcsUnknown { + if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)-initialSize) { + if debugDecoder { + println("decoder size exceeded; fcs:", frame.FrameContentSize, "> mcs:", d.o.maxDecodedSize-uint64(len(dst)-initialSize), "len:", len(dst)) + } + return dst, ErrDecoderSizeExceeded + } + if d.o.limitToCap && frame.FrameContentSize > uint64(cap(dst)-len(dst)) { + if debugDecoder { + println("decoder size exceeded; fcs:", frame.FrameContentSize, "> (cap-len)", cap(dst)-len(dst)) + } + return dst, ErrDecoderSizeExceeded + } + if cap(dst)-len(dst) < int(frame.FrameContentSize) { + dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)+compressedBlockOverAlloc) + copy(dst2, dst) + dst = dst2 + } + } + + if cap(dst) == 0 && !d.o.limitToCap { + // Allocate len(input) * 2 by default if nothing is provided + // and we didn't get frame content size. + size := len(input) * 2 + // Cap to 1 MB. + if size > 1<<20 { + size = 1 << 20 + } + if uint64(size) > d.o.maxDecodedSize { + size = int(d.o.maxDecodedSize) + } + dst = make([]byte, 0, size) + } + + dst, err = frame.runDecoder(dst, block) + if err != nil { + return dst, err + } + if uint64(len(dst)-initialSize) > d.o.maxDecodedSize { + return dst, ErrDecoderSizeExceeded + } + if len(frame.bBuf) == 0 { + if debugDecoder { + println("frame dbuf empty") + } + break + } + } + return dst, nil +} + +// nextBlock returns the next block. +// If an error occurs d.err will be set. +// Optionally the function can block for new output. +// If non-blocking mode is used the returned boolean will be false +// if no data was available without blocking. +func (d *Decoder) nextBlock(blocking bool) (ok bool) { + if d.current.err != nil { + // Keep error state. + return false + } + d.current.b = d.current.b[:0] + + // SYNC: + if d.syncStream.enabled { + if !blocking { + return false + } + ok = d.nextBlockSync() + if !ok { + d.stashDecoder() + } + return ok + } + + //ASYNC: + d.stashDecoder() + if blocking { + d.current.decodeOutput, ok = <-d.current.output + } else { + select { + case d.current.decodeOutput, ok = <-d.current.output: + default: + return false + } + } + if !ok { + // This should not happen, so signal error state... + d.current.err = io.ErrUnexpectedEOF + return false + } + next := d.current.decodeOutput + if next.d != nil && next.d.async.newHist != nil { + d.current.crc.Reset() + } + if debugDecoder { + var tmp [4]byte + binary.LittleEndian.PutUint32(tmp[:], uint32(xxhash.Sum64(next.b))) + println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp) + } + + if d.o.ignoreChecksum { + return true + } + + if len(next.b) > 0 { + d.current.crc.Write(next.b) + } + if next.err == nil && next.d != nil && next.d.hasCRC { + got := uint32(d.current.crc.Sum64()) + if got != next.d.checkCRC { + if debugDecoder { + printf("CRC Check Failed: %08x (got) != %08x (on stream)\n", got, next.d.checkCRC) + } + d.current.err = ErrCRCMismatch + } else { + if debugDecoder { + printf("CRC ok %08x\n", got) + } + } + } + + return true +} + +func (d *Decoder) nextBlockSync() (ok bool) { + if d.current.d == nil { + d.current.d = <-d.decoders + } + for len(d.current.b) == 0 { + if !d.syncStream.inFrame { + d.frame.history.reset() + d.current.err = d.frame.reset(&d.syncStream.br) + if d.current.err == nil { + d.current.err = d.setDict(d.frame) + } + if d.current.err != nil { + return false + } + if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize { + d.current.err = ErrDecoderSizeExceeded + return false + } + + d.syncStream.decodedFrame = 0 + d.syncStream.inFrame = true + } + d.current.err = d.frame.next(d.current.d) + if d.current.err != nil { + return false + } + d.frame.history.ensureBlock() + if debugDecoder { + println("History trimmed:", len(d.frame.history.b), "decoded already:", d.syncStream.decodedFrame) + } + histBefore := len(d.frame.history.b) + d.current.err = d.current.d.decodeBuf(&d.frame.history) + + if d.current.err != nil { + println("error after:", d.current.err) + return false + } + d.current.b = d.frame.history.b[histBefore:] + if debugDecoder { + println("history after:", len(d.frame.history.b)) + } + + // Check frame size (before CRC) + d.syncStream.decodedFrame += uint64(len(d.current.b)) + if d.syncStream.decodedFrame > d.frame.FrameContentSize { + if debugDecoder { + printf("DecodedFrame (%d) > FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) + } + d.current.err = ErrFrameSizeExceeded + return false + } + + // Check FCS + if d.current.d.Last && d.frame.FrameContentSize != fcsUnknown && d.syncStream.decodedFrame != d.frame.FrameContentSize { + if debugDecoder { + printf("DecodedFrame (%d) != FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) + } + d.current.err = ErrFrameSizeMismatch + return false + } + + // Update/Check CRC + if d.frame.HasCheckSum { + if !d.o.ignoreChecksum { + d.frame.crc.Write(d.current.b) + } + if d.current.d.Last { + if !d.o.ignoreChecksum { + d.current.err = d.frame.checkCRC() + } else { + d.current.err = d.frame.consumeCRC() + } + if d.current.err != nil { + println("CRC error:", d.current.err) + return false + } + } + } + d.syncStream.inFrame = !d.current.d.Last + } + return true +} + +func (d *Decoder) stashDecoder() { + if d.current.d != nil { + if debugDecoder { + printf("re-adding current decoder %p", d.current.d) + } + d.decoders <- d.current.d + d.current.d = nil + } +} + +// Close will release all resources. +// It is NOT possible to reuse the decoder after this. +func (d *Decoder) Close() { + if d.current.err == ErrDecoderClosed { + return + } + d.drainOutput() + if d.current.cancel != nil { + d.current.cancel() + d.streamWg.Wait() + d.current.cancel = nil + } + if d.decoders != nil { + close(d.decoders) + for dec := range d.decoders { + dec.Close() + } + d.decoders = nil + } + if d.current.d != nil { + d.current.d.Close() + d.current.d = nil + } + d.current.err = ErrDecoderClosed +} + +// IOReadCloser returns the decoder as an io.ReadCloser for convenience. +// Any changes to the decoder will be reflected, so the returned ReadCloser +// can be reused along with the decoder. +// io.WriterTo is also supported by the returned ReadCloser. +func (d *Decoder) IOReadCloser() io.ReadCloser { + return closeWrapper{d: d} +} + +// closeWrapper wraps a function call as a closer. +type closeWrapper struct { + d *Decoder +} + +// WriteTo forwards WriteTo calls to the decoder. +func (c closeWrapper) WriteTo(w io.Writer) (n int64, err error) { + return c.d.WriteTo(w) +} + +// Read forwards read calls to the decoder. +func (c closeWrapper) Read(p []byte) (n int, err error) { + return c.d.Read(p) +} + +// Close closes the decoder. +func (c closeWrapper) Close() error { + c.d.Close() + return nil +} + +type decodeOutput struct { + d *blockDec + b []byte + err error +} + +func (d *Decoder) startSyncDecoder(r io.Reader) error { + d.frame.history.reset() + d.syncStream.br = readerWrapper{r: r} + d.syncStream.inFrame = false + d.syncStream.enabled = true + d.syncStream.decodedFrame = 0 + return nil +} + +// Create Decoder: +// ASYNC: +// Spawn 3 go routines. +// 0: Read frames and decode block literals. +// 1: Decode sequences. +// 2: Execute sequences, send to output. +func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output chan decodeOutput) { + defer d.streamWg.Done() + br := readerWrapper{r: r} + + var seqDecode = make(chan *blockDec, d.o.concurrent) + var seqExecute = make(chan *blockDec, d.o.concurrent) + + // Async 1: Decode sequences... + go func() { + var hist history + var hasErr bool + + for block := range seqDecode { + if hasErr { + if block != nil { + seqExecute <- block + } + continue + } + if block.async.newHist != nil { + if debugDecoder { + println("Async 1: new history, recent:", block.async.newHist.recentOffsets) + } + hist.reset() + hist.decoders = block.async.newHist.decoders + hist.recentOffsets = block.async.newHist.recentOffsets + hist.windowSize = block.async.newHist.windowSize + if block.async.newHist.dict != nil { + hist.setDict(block.async.newHist.dict) + } + } + if block.err != nil || block.Type != blockTypeCompressed { + hasErr = block.err != nil + seqExecute <- block + continue + } + + hist.decoders.literals = block.async.literals + block.err = block.prepareSequences(block.async.seqData, &hist) + if debugDecoder && block.err != nil { + println("prepareSequences returned:", block.err) + } + hasErr = block.err != nil + if block.err == nil { + block.err = block.decodeSequences(&hist) + if debugDecoder && block.err != nil { + println("decodeSequences returned:", block.err) + } + hasErr = block.err != nil + // block.async.sequence = hist.decoders.seq[:hist.decoders.nSeqs] + block.async.seqSize = hist.decoders.seqSize + } + seqExecute <- block + } + close(seqExecute) + hist.reset() + }() + + var wg sync.WaitGroup + wg.Add(1) + + // Async 3: Execute sequences... + frameHistCache := d.frame.history.b + go func() { + var hist history + var decodedFrame uint64 + var fcs uint64 + var hasErr bool + for block := range seqExecute { + out := decodeOutput{err: block.err, d: block} + if block.err != nil || hasErr { + hasErr = true + output <- out + continue + } + if block.async.newHist != nil { + if debugDecoder { + println("Async 2: new history") + } + hist.reset() + hist.windowSize = block.async.newHist.windowSize + hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer + if block.async.newHist.dict != nil { + hist.setDict(block.async.newHist.dict) + } + + if cap(hist.b) < hist.allocFrameBuffer { + if cap(frameHistCache) >= hist.allocFrameBuffer { + hist.b = frameHistCache + } else { + hist.b = make([]byte, 0, hist.allocFrameBuffer) + println("Alloc history sized", hist.allocFrameBuffer) + } + } + hist.b = hist.b[:0] + fcs = block.async.fcs + decodedFrame = 0 + } + do := decodeOutput{err: block.err, d: block} + switch block.Type { + case blockTypeRLE: + if debugDecoder { + println("add rle block length:", block.RLESize) + } + + if cap(block.dst) < int(block.RLESize) { + if block.lowMem { + block.dst = make([]byte, block.RLESize) + } else { + block.dst = make([]byte, maxCompressedBlockSize) + } + } + block.dst = block.dst[:block.RLESize] + v := block.data[0] + for i := range block.dst { + block.dst[i] = v + } + hist.append(block.dst) + do.b = block.dst + case blockTypeRaw: + if debugDecoder { + println("add raw block length:", len(block.data)) + } + hist.append(block.data) + do.b = block.data + case blockTypeCompressed: + if debugDecoder { + println("execute with history length:", len(hist.b), "window:", hist.windowSize) + } + hist.decoders.seqSize = block.async.seqSize + hist.decoders.literals = block.async.literals + do.err = block.executeSequences(&hist) + hasErr = do.err != nil + if debugDecoder && hasErr { + println("executeSequences returned:", do.err) + } + do.b = block.dst + } + if !hasErr { + decodedFrame += uint64(len(do.b)) + if decodedFrame > fcs { + println("fcs exceeded", block.Last, fcs, decodedFrame) + do.err = ErrFrameSizeExceeded + hasErr = true + } else if block.Last && fcs != fcsUnknown && decodedFrame != fcs { + do.err = ErrFrameSizeMismatch + hasErr = true + } else { + if debugDecoder { + println("fcs ok", block.Last, fcs, decodedFrame) + } + } + } + output <- do + } + close(output) + frameHistCache = hist.b + wg.Done() + if debugDecoder { + println("decoder goroutines finished") + } + hist.reset() + }() + + var hist history +decodeStream: + for { + var hasErr bool + hist.reset() + decodeBlock := func(block *blockDec) { + if hasErr { + if block != nil { + seqDecode <- block + } + return + } + if block.err != nil || block.Type != blockTypeCompressed { + hasErr = block.err != nil + seqDecode <- block + return + } + + remain, err := block.decodeLiterals(block.data, &hist) + block.err = err + hasErr = block.err != nil + if err == nil { + block.async.literals = hist.decoders.literals + block.async.seqData = remain + } else if debugDecoder { + println("decodeLiterals error:", err) + } + seqDecode <- block + } + frame := d.frame + if debugDecoder { + println("New frame...") + } + var historySent bool + frame.history.reset() + err := frame.reset(&br) + if debugDecoder && err != nil { + println("Frame decoder returned", err) + } + if err == nil { + err = d.setDict(frame) + } + if err == nil && d.frame.WindowSize > d.o.maxWindowSize { + if debugDecoder { + println("decoder size exceeded, fws:", d.frame.WindowSize, "> mws:", d.o.maxWindowSize) + } + + err = ErrDecoderSizeExceeded + } + if err != nil { + select { + case <-ctx.Done(): + case dec := <-d.decoders: + dec.sendErr(err) + decodeBlock(dec) + } + break decodeStream + } + + // Go through all blocks of the frame. + for { + var dec *blockDec + select { + case <-ctx.Done(): + break decodeStream + case dec = <-d.decoders: + // Once we have a decoder, we MUST return it. + } + err := frame.next(dec) + if !historySent { + h := frame.history + if debugDecoder { + println("Alloc History:", h.allocFrameBuffer) + } + hist.reset() + if h.dict != nil { + hist.setDict(h.dict) + } + dec.async.newHist = &h + dec.async.fcs = frame.FrameContentSize + historySent = true + } else { + dec.async.newHist = nil + } + if debugDecoder && err != nil { + println("next block returned error:", err) + } + dec.err = err + dec.hasCRC = false + if dec.Last && frame.HasCheckSum && err == nil { + crc, err := frame.rawInput.readSmall(4) + if len(crc) < 4 { + if err == nil { + err = io.ErrUnexpectedEOF + + } + println("CRC missing?", err) + dec.err = err + } else { + dec.checkCRC = binary.LittleEndian.Uint32(crc) + dec.hasCRC = true + if debugDecoder { + printf("found crc to check: %08x\n", dec.checkCRC) + } + } + } + err = dec.err + last := dec.Last + decodeBlock(dec) + if err != nil { + break decodeStream + } + if last { + break + } + } + } + close(seqDecode) + wg.Wait() + hist.reset() + d.frame.history.b = frameHistCache +} + +func (d *Decoder) setDict(frame *frameDec) (err error) { + dict, ok := d.dicts[frame.DictionaryID] + if ok { + if debugDecoder { + println("setting dict", frame.DictionaryID) + } + frame.history.setDict(dict) + } else if frame.DictionaryID != 0 { + // A zero or missing dictionary id is ambiguous: + // either dictionary zero, or no dictionary. In particular, + // zstd --patch-from uses this id for the source file, + // so only return an error if the dictionary id is not zero. + err = ErrUnknownDictionary + } + return err +} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go new file mode 100644 index 00000000..774c5f00 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go @@ -0,0 +1,169 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "math/bits" + "runtime" +) + +// DOption is an option for creating a decoder. +type DOption func(*decoderOptions) error + +// options retains accumulated state of multiple options. +type decoderOptions struct { + lowMem bool + concurrent int + maxDecodedSize uint64 + maxWindowSize uint64 + dicts []*dict + ignoreChecksum bool + limitToCap bool + decodeBufsBelow int +} + +func (o *decoderOptions) setDefault() { + *o = decoderOptions{ + // use less ram: true for now, but may change. + lowMem: true, + concurrent: runtime.GOMAXPROCS(0), + maxWindowSize: MaxWindowSize, + decodeBufsBelow: 128 << 10, + } + if o.concurrent > 4 { + o.concurrent = 4 + } + o.maxDecodedSize = 64 << 30 +} + +// WithDecoderLowmem will set whether to use a lower amount of memory, +// but possibly have to allocate more while running. +func WithDecoderLowmem(b bool) DOption { + return func(o *decoderOptions) error { o.lowMem = b; return nil } +} + +// WithDecoderConcurrency sets the number of created decoders. +// When decoding block with DecodeAll, this will limit the number +// of possible concurrently running decodes. +// When decoding streams, this will limit the number of +// inflight blocks. +// When decoding streams and setting maximum to 1, +// no async decoding will be done. +// When a value of 0 is provided GOMAXPROCS will be used. +// By default this will be set to 4 or GOMAXPROCS, whatever is lower. +func WithDecoderConcurrency(n int) DOption { + return func(o *decoderOptions) error { + if n < 0 { + return errors.New("concurrency must be at least 1") + } + if n == 0 { + o.concurrent = runtime.GOMAXPROCS(0) + } else { + o.concurrent = n + } + return nil + } +} + +// WithDecoderMaxMemory allows to set a maximum decoded size for in-memory +// non-streaming operations or maximum window size for streaming operations. +// This can be used to control memory usage of potentially hostile content. +// Maximum is 1 << 63 bytes. Default is 64GiB. +func WithDecoderMaxMemory(n uint64) DOption { + return func(o *decoderOptions) error { + if n == 0 { + return errors.New("WithDecoderMaxMemory must be at least 1") + } + if n > 1<<63 { + return errors.New("WithDecoderMaxmemory must be less than 1 << 63") + } + o.maxDecodedSize = n + return nil + } +} + +// WithDecoderDicts allows to register one or more dictionaries for the decoder. +// +// Each slice in dict must be in the [dictionary format] produced by +// "zstd --train" from the Zstandard reference implementation. +// +// If several dictionaries with the same ID are provided, the last one will be used. +// +// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format +func WithDecoderDicts(dicts ...[]byte) DOption { + return func(o *decoderOptions) error { + for _, b := range dicts { + d, err := loadDict(b) + if err != nil { + return err + } + o.dicts = append(o.dicts, d) + } + return nil + } +} + +// WithDecoderDictRaw registers a dictionary that may be used by the decoder. +// The slice content can be arbitrary data. +func WithDecoderDictRaw(id uint32, content []byte) DOption { + return func(o *decoderOptions) error { + if bits.UintSize > 32 && uint(len(content)) > dictMaxLength { + return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content)) + } + o.dicts = append(o.dicts, &dict{id: id, content: content, offsets: [3]int{1, 4, 8}}) + return nil + } +} + +// WithDecoderMaxWindow allows to set a maximum window size for decodes. +// This allows rejecting packets that will cause big memory usage. +// The Decoder will likely allocate more memory based on the WithDecoderLowmem setting. +// If WithDecoderMaxMemory is set to a lower value, that will be used. +// Default is 512MB, Maximum is ~3.75 TB as per zstandard spec. +func WithDecoderMaxWindow(size uint64) DOption { + return func(o *decoderOptions) error { + if size < MinWindowSize { + return errors.New("WithMaxWindowSize must be at least 1KB, 1024 bytes") + } + if size > (1<<41)+7*(1<<38) { + return errors.New("WithMaxWindowSize must be less than (1<<41) + 7*(1<<38) ~ 3.75TB") + } + o.maxWindowSize = size + return nil + } +} + +// WithDecodeAllCapLimit will limit DecodeAll to decoding cap(dst)-len(dst) bytes, +// or any size set in WithDecoderMaxMemory. +// This can be used to limit decoding to a specific maximum output size. +// Disabled by default. +func WithDecodeAllCapLimit(b bool) DOption { + return func(o *decoderOptions) error { + o.limitToCap = b + return nil + } +} + +// WithDecodeBuffersBelow will fully decode readers that have a +// `Bytes() []byte` and `Len() int` interface similar to bytes.Buffer. +// This typically uses less allocations but will have the full decompressed object in memory. +// Note that DecodeAllCapLimit will disable this, as well as giving a size of 0 or less. +// Default is 128KiB. +func WithDecodeBuffersBelow(size int) DOption { + return func(o *decoderOptions) error { + o.decodeBufsBelow = size + return nil + } +} + +// IgnoreChecksum allows to forcibly ignore checksum checking. +func IgnoreChecksum(b bool) DOption { + return func(o *decoderOptions) error { + o.ignoreChecksum = b + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go new file mode 100644 index 00000000..b7b83164 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/dict.go @@ -0,0 +1,565 @@ +package zstd + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "sort" + + "github.com/klauspost/compress/huff0" +) + +type dict struct { + id uint32 + + litEnc *huff0.Scratch + llDec, ofDec, mlDec sequenceDec + offsets [3]int + content []byte +} + +const dictMagic = "\x37\xa4\x30\xec" + +// Maximum dictionary size for the reference implementation (1.5.3) is 2 GiB. +const dictMaxLength = 1 << 31 + +// ID returns the dictionary id or 0 if d is nil. +func (d *dict) ID() uint32 { + if d == nil { + return 0 + } + return d.id +} + +// ContentSize returns the dictionary content size or 0 if d is nil. +func (d *dict) ContentSize() int { + if d == nil { + return 0 + } + return len(d.content) +} + +// Content returns the dictionary content. +func (d *dict) Content() []byte { + if d == nil { + return nil + } + return d.content +} + +// Offsets returns the initial offsets. +func (d *dict) Offsets() [3]int { + if d == nil { + return [3]int{} + } + return d.offsets +} + +// LitEncoder returns the literal encoder. +func (d *dict) LitEncoder() *huff0.Scratch { + if d == nil { + return nil + } + return d.litEnc +} + +// Load a dictionary as described in +// https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format +func loadDict(b []byte) (*dict, error) { + // Check static field size. + if len(b) <= 8+(3*4) { + return nil, io.ErrUnexpectedEOF + } + d := dict{ + llDec: sequenceDec{fse: &fseDecoder{}}, + ofDec: sequenceDec{fse: &fseDecoder{}}, + mlDec: sequenceDec{fse: &fseDecoder{}}, + } + if string(b[:4]) != dictMagic { + return nil, ErrMagicMismatch + } + d.id = binary.LittleEndian.Uint32(b[4:8]) + if d.id == 0 { + return nil, errors.New("dictionaries cannot have ID 0") + } + + // Read literal table + var err error + d.litEnc, b, err = huff0.ReadTable(b[8:], nil) + if err != nil { + return nil, fmt.Errorf("loading literal table: %w", err) + } + d.litEnc.Reuse = huff0.ReusePolicyMust + + br := byteReader{ + b: b, + off: 0, + } + readDec := func(i tableIndex, dec *fseDecoder) error { + if err := dec.readNCount(&br, uint16(maxTableSymbol[i])); err != nil { + return err + } + if br.overread() { + return io.ErrUnexpectedEOF + } + err = dec.transform(symbolTableX[i]) + if err != nil { + println("Transform table error:", err) + return err + } + if debugDecoder || debugEncoder { + println("Read table ok", "symbolLen:", dec.symbolLen) + } + // Set decoders as predefined so they aren't reused. + dec.preDefined = true + return nil + } + + if err := readDec(tableOffsets, d.ofDec.fse); err != nil { + return nil, err + } + if err := readDec(tableMatchLengths, d.mlDec.fse); err != nil { + return nil, err + } + if err := readDec(tableLiteralLengths, d.llDec.fse); err != nil { + return nil, err + } + if br.remain() < 12 { + return nil, io.ErrUnexpectedEOF + } + + d.offsets[0] = int(br.Uint32()) + br.advance(4) + d.offsets[1] = int(br.Uint32()) + br.advance(4) + d.offsets[2] = int(br.Uint32()) + br.advance(4) + if d.offsets[0] <= 0 || d.offsets[1] <= 0 || d.offsets[2] <= 0 { + return nil, errors.New("invalid offset in dictionary") + } + d.content = make([]byte, br.remain()) + copy(d.content, br.unread()) + if d.offsets[0] > len(d.content) || d.offsets[1] > len(d.content) || d.offsets[2] > len(d.content) { + return nil, fmt.Errorf("initial offset bigger than dictionary content size %d, offsets: %v", len(d.content), d.offsets) + } + + return &d, nil +} + +// InspectDictionary loads a zstd dictionary and provides functions to inspect the content. +func InspectDictionary(b []byte) (interface { + ID() uint32 + ContentSize() int + Content() []byte + Offsets() [3]int + LitEncoder() *huff0.Scratch +}, error) { + initPredefined() + d, err := loadDict(b) + return d, err +} + +type BuildDictOptions struct { + // Dictionary ID. + ID uint32 + + // Content to use to create dictionary tables. + Contents [][]byte + + // History to use for all blocks. + History []byte + + // Offsets to use. + Offsets [3]int + + // CompatV155 will make the dictionary compatible with Zstd v1.5.5 and earlier. + // See https://github.com/facebook/zstd/issues/3724 + CompatV155 bool + + // Use the specified encoder level. + // The dictionary will be built using the specified encoder level, + // which will reflect speed and make the dictionary tailored for that level. + // If not set SpeedBestCompression will be used. + Level EncoderLevel + + // DebugOut will write stats and other details here if set. + DebugOut io.Writer +} + +func BuildDict(o BuildDictOptions) ([]byte, error) { + initPredefined() + hist := o.History + contents := o.Contents + debug := o.DebugOut != nil + println := func(args ...interface{}) { + if o.DebugOut != nil { + fmt.Fprintln(o.DebugOut, args...) + } + } + printf := func(s string, args ...interface{}) { + if o.DebugOut != nil { + fmt.Fprintf(o.DebugOut, s, args...) + } + } + print := func(args ...interface{}) { + if o.DebugOut != nil { + fmt.Fprint(o.DebugOut, args...) + } + } + + if int64(len(hist)) > dictMaxLength { + return nil, fmt.Errorf("dictionary of size %d > %d", len(hist), int64(dictMaxLength)) + } + if len(hist) < 8 { + return nil, fmt.Errorf("dictionary of size %d < %d", len(hist), 8) + } + if len(contents) == 0 { + return nil, errors.New("no content provided") + } + d := dict{ + id: o.ID, + litEnc: nil, + llDec: sequenceDec{}, + ofDec: sequenceDec{}, + mlDec: sequenceDec{}, + offsets: o.Offsets, + content: hist, + } + block := blockEnc{lowMem: false} + block.init() + enc := encoder(&bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(maxMatchLen), bufferReset: math.MaxInt32 - int32(maxMatchLen*2), lowMem: false}}) + if o.Level != 0 { + eOpts := encoderOptions{ + level: o.Level, + blockSize: maxMatchLen, + windowSize: maxMatchLen, + dict: &d, + lowMem: false, + } + enc = eOpts.encoder() + } else { + o.Level = SpeedBestCompression + } + var ( + remain [256]int + ll [256]int + ml [256]int + of [256]int + ) + addValues := func(dst *[256]int, src []byte) { + for _, v := range src { + dst[v]++ + } + } + addHist := func(dst *[256]int, src *[256]uint32) { + for i, v := range src { + dst[i] += int(v) + } + } + seqs := 0 + nUsed := 0 + litTotal := 0 + newOffsets := make(map[uint32]int, 1000) + for _, b := range contents { + block.reset(nil) + if len(b) < 8 { + continue + } + nUsed++ + enc.Reset(&d, true) + enc.Encode(&block, b) + addValues(&remain, block.literals) + litTotal += len(block.literals) + if len(block.sequences) == 0 { + continue + } + seqs += len(block.sequences) + block.genCodes() + addHist(&ll, block.coders.llEnc.Histogram()) + addHist(&ml, block.coders.mlEnc.Histogram()) + addHist(&of, block.coders.ofEnc.Histogram()) + for i, seq := range block.sequences { + if i > 3 { + break + } + offset := seq.offset + if offset == 0 { + continue + } + if int(offset) >= len(o.History) { + continue + } + if offset > 3 { + newOffsets[offset-3]++ + } else { + newOffsets[uint32(o.Offsets[offset-1])]++ + } + } + } + // Find most used offsets. + var sortedOffsets []uint32 + for k := range newOffsets { + sortedOffsets = append(sortedOffsets, k) + } + sort.Slice(sortedOffsets, func(i, j int) bool { + a, b := sortedOffsets[i], sortedOffsets[j] + if a == b { + // Prefer the longer offset + return sortedOffsets[i] > sortedOffsets[j] + } + return newOffsets[sortedOffsets[i]] > newOffsets[sortedOffsets[j]] + }) + if len(sortedOffsets) > 3 { + if debug { + print("Offsets:") + for i, v := range sortedOffsets { + if i > 20 { + break + } + printf("[%d: %d],", v, newOffsets[v]) + } + println("") + } + + sortedOffsets = sortedOffsets[:3] + } + for i, v := range sortedOffsets { + o.Offsets[i] = int(v) + } + if debug { + println("New repeat offsets", o.Offsets) + } + + if nUsed == 0 || seqs == 0 { + return nil, fmt.Errorf("%d blocks, %d sequences found", nUsed, seqs) + } + if debug { + println("Sequences:", seqs, "Blocks:", nUsed, "Literals:", litTotal) + } + if seqs/nUsed < 512 { + // Use 512 as minimum. + nUsed = seqs / 512 + if nUsed == 0 { + nUsed = 1 + } + } + copyHist := func(dst *fseEncoder, src *[256]int) ([]byte, error) { + hist := dst.Histogram() + var maxSym uint8 + var maxCount int + var fakeLength int + for i, v := range src { + if v > 0 { + v = v / nUsed + if v == 0 { + v = 1 + } + } + if v > maxCount { + maxCount = v + } + if v != 0 { + maxSym = uint8(i) + } + fakeLength += v + hist[i] = uint32(v) + } + + // Ensure we aren't trying to represent RLE. + if maxCount == fakeLength { + for i := range hist { + if uint8(i) == maxSym { + fakeLength++ + maxSym++ + hist[i+1] = 1 + if maxSym > 1 { + break + } + } + if hist[0] == 0 { + fakeLength++ + hist[i] = 1 + if maxSym > 1 { + break + } + } + } + } + + dst.HistogramFinished(maxSym, maxCount) + dst.reUsed = false + dst.useRLE = false + err := dst.normalizeCount(fakeLength) + if err != nil { + return nil, err + } + if debug { + println("RAW:", dst.count[:maxSym+1], "NORM:", dst.norm[:maxSym+1], "LEN:", fakeLength) + } + return dst.writeCount(nil) + } + if debug { + print("Literal lengths: ") + } + llTable, err := copyHist(block.coders.llEnc, &ll) + if err != nil { + return nil, err + } + if debug { + print("Match lengths: ") + } + mlTable, err := copyHist(block.coders.mlEnc, &ml) + if err != nil { + return nil, err + } + if debug { + print("Offsets: ") + } + ofTable, err := copyHist(block.coders.ofEnc, &of) + if err != nil { + return nil, err + } + + // Literal table + avgSize := litTotal + if avgSize > huff0.BlockSizeMax/2 { + avgSize = huff0.BlockSizeMax / 2 + } + huffBuff := make([]byte, 0, avgSize) + // Target size + div := litTotal / avgSize + if div < 1 { + div = 1 + } + if debug { + println("Huffman weights:") + } + for i, n := range remain[:] { + if n > 0 { + n = n / div + // Allow all entries to be represented. + if n == 0 { + n = 1 + } + huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...) + if debug { + printf("[%d: %d], ", i, n) + } + } + } + if o.CompatV155 && remain[255]/div == 0 { + huffBuff = append(huffBuff, 255) + } + scratch := &huff0.Scratch{TableLog: 11} + for tries := 0; tries < 255; tries++ { + scratch = &huff0.Scratch{TableLog: 11} + _, _, err = huff0.Compress1X(huffBuff, scratch) + if err == nil { + break + } + if debug { + printf("Try %d: Huffman error: %v\n", tries+1, err) + } + huffBuff = huffBuff[:0] + if tries == 250 { + if debug { + println("Huffman: Bailing out with predefined table") + } + + // Bail out.... Just generate something + huffBuff = append(huffBuff, bytes.Repeat([]byte{255}, 10000)...) + for i := 0; i < 128; i++ { + huffBuff = append(huffBuff, byte(i)) + } + continue + } + if errors.Is(err, huff0.ErrIncompressible) { + // Try truncating least common. + for i, n := range remain[:] { + if n > 0 { + n = n / (div * (i + 1)) + if n > 0 { + huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...) + } + } + } + if o.CompatV155 && len(huffBuff) > 0 && huffBuff[len(huffBuff)-1] != 255 { + huffBuff = append(huffBuff, 255) + } + if len(huffBuff) == 0 { + huffBuff = append(huffBuff, 0, 255) + } + } + if errors.Is(err, huff0.ErrUseRLE) { + for i, n := range remain[:] { + n = n / (div * (i + 1)) + // Allow all entries to be represented. + if n == 0 { + n = 1 + } + huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...) + } + } + } + + var out bytes.Buffer + out.Write([]byte(dictMagic)) + out.Write(binary.LittleEndian.AppendUint32(nil, o.ID)) + out.Write(scratch.OutTable) + if debug { + println("huff table:", len(scratch.OutTable), "bytes") + println("of table:", len(ofTable), "bytes") + println("ml table:", len(mlTable), "bytes") + println("ll table:", len(llTable), "bytes") + } + out.Write(ofTable) + out.Write(mlTable) + out.Write(llTable) + out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[0]))) + out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[1]))) + out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[2]))) + out.Write(hist) + if debug { + _, err := loadDict(out.Bytes()) + if err != nil { + panic(err) + } + i, err := InspectDictionary(out.Bytes()) + if err != nil { + panic(err) + } + println("ID:", i.ID()) + println("Content size:", i.ContentSize()) + println("Encoder:", i.LitEncoder() != nil) + println("Offsets:", i.Offsets()) + var totalSize int + for _, b := range contents { + totalSize += len(b) + } + + encWith := func(opts ...EOption) int { + enc, err := NewWriter(nil, opts...) + if err != nil { + panic(err) + } + defer enc.Close() + var dst []byte + var totalSize int + for _, b := range contents { + dst = enc.EncodeAll(b, dst[:0]) + totalSize += len(dst) + } + return totalSize + } + plain := encWith(WithEncoderLevel(o.Level)) + withDict := encWith(WithEncoderLevel(o.Level), WithEncoderDict(out.Bytes())) + println("Input size:", totalSize) + println("Plain Compressed:", plain) + println("Dict Compressed:", withDict) + println("Saved:", plain-withDict, (plain-withDict)/len(contents), "bytes per input (rounded down)") + } + return out.Bytes(), nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go new file mode 100644 index 00000000..5ca46038 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go @@ -0,0 +1,173 @@ +package zstd + +import ( + "fmt" + "math/bits" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +const ( + dictShardBits = 6 +) + +type fastBase struct { + // cur is the offset at the start of hist + cur int32 + // maximum offset. Should be at least 2x block size. + maxMatchOff int32 + bufferReset int32 + hist []byte + crc *xxhash.Digest + tmp [8]byte + blk *blockEnc + lastDictID uint32 + lowMem bool +} + +// CRC returns the underlying CRC writer. +func (e *fastBase) CRC() *xxhash.Digest { + return e.crc +} + +// AppendCRC will append the CRC to the destination slice and return it. +func (e *fastBase) AppendCRC(dst []byte) []byte { + crc := e.crc.Sum(e.tmp[:0]) + dst = append(dst, crc[7], crc[6], crc[5], crc[4]) + return dst +} + +// WindowSize returns the window size of the encoder, +// or a window size small enough to contain the input size, if > 0. +func (e *fastBase) WindowSize(size int64) int32 { + if size > 0 && size < int64(e.maxMatchOff) { + b := int32(1) << uint(bits.Len(uint(size))) + // Keep minimum window. + if b < 1024 { + b = 1024 + } + return b + } + return e.maxMatchOff +} + +// Block returns the current block. +func (e *fastBase) Block() *blockEnc { + return e.blk +} + +func (e *fastBase) addBlock(src []byte) int32 { + if debugAsserts && e.cur > e.bufferReset { + panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, e.bufferReset)) + } + // check if we have space already + if len(e.hist)+len(src) > cap(e.hist) { + if cap(e.hist) == 0 { + e.ensureHist(len(src)) + } else { + if cap(e.hist) < int(e.maxMatchOff+maxCompressedBlockSize) { + panic(fmt.Errorf("unexpected buffer cap %d, want at least %d with window %d", cap(e.hist), e.maxMatchOff+maxCompressedBlockSize, e.maxMatchOff)) + } + // Move down + offset := int32(len(e.hist)) - e.maxMatchOff + copy(e.hist[0:e.maxMatchOff], e.hist[offset:]) + e.cur += offset + e.hist = e.hist[:e.maxMatchOff] + } + } + s := int32(len(e.hist)) + e.hist = append(e.hist, src...) + return s +} + +// ensureHist will ensure that history can keep at least this many bytes. +func (e *fastBase) ensureHist(n int) { + if cap(e.hist) >= n { + return + } + l := e.maxMatchOff + if (e.lowMem && e.maxMatchOff > maxCompressedBlockSize) || e.maxMatchOff <= maxCompressedBlockSize { + l += maxCompressedBlockSize + } else { + l += e.maxMatchOff + } + // Make it at least 1MB. + if l < 1<<20 && !e.lowMem { + l = 1 << 20 + } + // Make it at least the requested size. + if l < int32(n) { + l = int32(n) + } + e.hist = make([]byte, 0, l) +} + +// useBlock will replace the block with the provided one, +// but transfer recent offsets from the previous. +func (e *fastBase) UseBlock(enc *blockEnc) { + enc.reset(e.blk) + e.blk = enc +} + +func (e *fastBase) matchlen(s, t int32, src []byte) int32 { + if debugAsserts { + if s < 0 { + err := fmt.Sprintf("s (%d) < 0", s) + panic(err) + } + if t < 0 { + err := fmt.Sprintf("s (%d) < 0", s) + panic(err) + } + if s-t > e.maxMatchOff { + err := fmt.Sprintf("s (%d) - t (%d) > maxMatchOff (%d)", s, t, e.maxMatchOff) + panic(err) + } + if len(src)-int(s) > maxCompressedBlockSize { + panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize)) + } + } + return int32(matchLen(src[s:], src[t:])) +} + +// Reset the encoding table. +func (e *fastBase) resetBase(d *dict, singleBlock bool) { + if e.blk == nil { + e.blk = &blockEnc{lowMem: e.lowMem} + e.blk.init() + } else { + e.blk.reset(nil) + } + e.blk.initNewEncode() + if e.crc == nil { + e.crc = xxhash.New() + } else { + e.crc.Reset() + } + e.blk.dictLitEnc = nil + if d != nil { + low := e.lowMem + if singleBlock { + e.lowMem = true + } + e.ensureHist(d.ContentSize() + maxCompressedBlockSize) + e.lowMem = low + } + + // We offset current position so everything will be out of reach. + // If above reset line, history will be purged. + if e.cur < e.bufferReset { + e.cur += e.maxMatchOff + int32(len(e.hist)) + } + e.hist = e.hist[:0] + if d != nil { + // Set offsets (currently not used) + for i, off := range d.offsets { + e.blk.recentOffsets[i] = uint32(off) + e.blk.prevRecentOffsets[i] = e.blk.recentOffsets[i] + } + // Transfer litenc. + e.blk.dictLitEnc = d.litEnc + e.hist = append(e.hist, d.content...) + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go new file mode 100644 index 00000000..4613724e --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go @@ -0,0 +1,560 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "bytes" + "fmt" + + "github.com/klauspost/compress" +) + +const ( + bestLongTableBits = 22 // Bits used in the long match table + bestLongTableSize = 1 << bestLongTableBits // Size of the table + bestLongLen = 8 // Bytes used for table hash + + // Note: Increasing the short table bits or making the hash shorter + // can actually lead to compression degradation since it will 'steal' more from the + // long match table and match offsets are quite big. + // This greatly depends on the type of input. + bestShortTableBits = 18 // Bits used in the short match table + bestShortTableSize = 1 << bestShortTableBits // Size of the table + bestShortLen = 4 // Bytes used for table hash + +) + +type match struct { + offset int32 + s int32 + length int32 + rep int32 + est int32 +} + +const highScore = maxMatchLen * 8 + +// estBits will estimate output bits from predefined tables. +func (m *match) estBits(bitsPerByte int32) { + mlc := mlCode(uint32(m.length - zstdMinMatch)) + var ofc uint8 + if m.rep < 0 { + ofc = ofCode(uint32(m.s-m.offset) + 3) + } else { + ofc = ofCode(uint32(m.rep) & 3) + } + // Cost, excluding + ofTT, mlTT := fsePredefEnc[tableOffsets].ct.symbolTT[ofc], fsePredefEnc[tableMatchLengths].ct.symbolTT[mlc] + + // Add cost of match encoding... + m.est = int32(ofTT.outBits + mlTT.outBits) + m.est += int32(ofTT.deltaNbBits>>16 + mlTT.deltaNbBits>>16) + // Subtract savings compared to literal encoding... + m.est -= (m.length * bitsPerByte) >> 10 + if m.est > 0 { + // Unlikely gain.. + m.length = 0 + m.est = highScore + } +} + +// bestFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. +// The long match table contains the previous entry with the same hash, +// effectively making it a "chain" of length 2. +// When we find a long match we choose between the two values and select the longest. +// When we find a short match, after checking the long, we check if we can find a long at n+1 +// and that it is longer (lazy matching). +type bestFastEncoder struct { + fastBase + table [bestShortTableSize]prevEntry + longTable [bestLongTableSize]prevEntry + dictTable []prevEntry + dictLongTable []prevEntry +} + +// Encode improves compression... +func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 4 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + e.table = [bestShortTableSize]prevEntry{} + e.longTable = [bestLongTableSize]prevEntry{} + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + v2 := e.table[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.table[i] = prevEntry{ + offset: v, + prev: v2, + } + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.cur = e.maxMatchOff + break + } + + // Add block to history + s := e.addBlock(src) + blk.size = len(src) + + // Check RLE first + if len(src) > zstdMinMatch { + ml := matchLen(src[1:], src) + if ml == len(src)-1 { + blk.literals = append(blk.literals, src[0]) + blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3}) + return + } + } + + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Use this to estimate literal cost. + // Scaled by 10 bits. + bitsPerByte := int32((compress.ShannonEntropyBits(src) * 1024) / len(src)) + // Huffman can never go < 1 bit/byte + if bitsPerByte < 1024 { + bitsPerByte = 1024 + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + const kSearchStrength = 10 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + offset3 := int32(blk.recentOffsets[2]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + const goodEnough = 250 + + cv := load6432(src, s) + + nextHashL := hashLen(cv, bestLongTableBits, bestLongLen) + nextHashS := hashLen(cv, bestShortTableBits, bestShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + // Set m to a match at offset if it looks like that will improve compression. + improve := func(m *match, offset int32, s int32, first uint32, rep int32) { + delta := s - offset + if delta >= e.maxMatchOff || delta <= 0 || load3232(src, offset) != first { + return + } + // Try to quick reject if we already have a long match. + if m.length > 16 { + left := len(src) - int(m.s+m.length) + // If we are too close to the end, keep as is. + if left <= 0 { + return + } + checkLen := m.length - (s - m.s) - 8 + if left > 2 && checkLen > 4 { + // Check 4 bytes, 4 bytes from the end of the current match. + a := load3232(src, offset+checkLen) + b := load3232(src, s+checkLen) + if a != b { + return + } + } + } + l := 4 + e.matchlen(s+4, offset+4, src) + if m.rep <= 0 { + // Extend candidate match backwards as far as possible. + // Do not extend repeats as we can assume they are optimal + // and offsets change if s == nextEmit. + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for offset > tMin && s > nextEmit && src[offset-1] == src[s-1] && l < maxMatchLength { + s-- + offset-- + l++ + } + } + if debugAsserts { + if offset >= s { + panic(fmt.Sprintf("offset: %d - s:%d - rep: %d - cur :%d - max: %d", offset, s, rep, e.cur, e.maxMatchOff)) + } + if !bytes.Equal(src[s:s+l], src[offset:offset+l]) { + panic(fmt.Sprintf("second match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first)) + } + } + cand := match{offset: offset, s: s, length: l, rep: rep} + cand.estBits(bitsPerByte) + if m.est >= highScore || cand.est-m.est+(cand.s-m.s)*bitsPerByte>>10 < 0 { + *m = cand + } + } + + best := match{s: s, est: highScore} + improve(&best, candidateL.offset-e.cur, s, uint32(cv), -1) + improve(&best, candidateL.prev-e.cur, s, uint32(cv), -1) + improve(&best, candidateS.offset-e.cur, s, uint32(cv), -1) + improve(&best, candidateS.prev-e.cur, s, uint32(cv), -1) + + if canRepeat && best.length < goodEnough { + if s == nextEmit { + // Check repeats straight after a match. + improve(&best, s-offset2, s, uint32(cv), 1|4) + improve(&best, s-offset3, s, uint32(cv), 2|4) + if offset1 > 1 { + improve(&best, s-(offset1-1), s, uint32(cv), 3|4) + } + } + + // If either no match or a non-repeat match, check at + 1 + if best.rep <= 0 { + cv32 := uint32(cv >> 8) + spp := s + 1 + improve(&best, spp-offset1, spp, cv32, 1) + improve(&best, spp-offset2, spp, cv32, 2) + improve(&best, spp-offset3, spp, cv32, 3) + if best.rep < 0 { + cv32 = uint32(cv >> 24) + spp += 2 + improve(&best, spp-offset1, spp, cv32, 1) + improve(&best, spp-offset2, spp, cv32, 2) + improve(&best, spp-offset3, spp, cv32, 3) + } + } + } + // Load next and check... + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: candidateL.offset} + e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: candidateS.offset} + index0 := s + 1 + + // Look far ahead, unless we have a really long match already... + if best.length < goodEnough { + // No match found, move forward on input, no need to check forward... + if best.length < 4 { + s += 1 + (s-nextEmit)>>(kSearchStrength-1) + if s >= sLimit { + break encodeLoop + } + continue + } + + candidateS = e.table[hashLen(cv>>8, bestShortTableBits, bestShortLen)] + cv = load6432(src, s+1) + cv2 := load6432(src, s+2) + candidateL = e.longTable[hashLen(cv, bestLongTableBits, bestLongLen)] + candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)] + + // Short at s+1 + improve(&best, candidateS.offset-e.cur, s+1, uint32(cv), -1) + // Long at s+1, s+2 + improve(&best, candidateL.offset-e.cur, s+1, uint32(cv), -1) + improve(&best, candidateL.prev-e.cur, s+1, uint32(cv), -1) + improve(&best, candidateL2.offset-e.cur, s+2, uint32(cv2), -1) + improve(&best, candidateL2.prev-e.cur, s+2, uint32(cv2), -1) + if false { + // Short at s+3. + // Too often worse... + improve(&best, e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+3, uint32(cv2>>8), -1) + } + + // Start check at a fixed offset to allow for a few mismatches. + // For this compression level 2 yields the best results. + // We cannot do this if we have already indexed this position. + const skipBeginning = 2 + if best.s > s-skipBeginning { + // See if we can find a better match by checking where the current best ends. + // Use that offset to see if we can find a better full match. + if sAt := best.s + best.length; sAt < sLimit { + nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen) + candidateEnd := e.longTable[nextHashL] + + if off := candidateEnd.offset - e.cur - best.length + skipBeginning; off >= 0 { + improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) + if off := candidateEnd.prev - e.cur - best.length + skipBeginning; off >= 0 { + improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) + } + } + } + } + } + + if debugAsserts { + if best.offset >= best.s { + panic(fmt.Sprintf("best.offset > s: %d >= %d", best.offset, best.s)) + } + if best.s < nextEmit { + panic(fmt.Sprintf("s %d < nextEmit %d", best.s, nextEmit)) + } + if best.offset < s-e.maxMatchOff { + panic(fmt.Sprintf("best.offset < s-e.maxMatchOff: %d < %d", best.offset, s-e.maxMatchOff)) + } + if !bytes.Equal(src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]) { + panic(fmt.Sprintf("match mismatch: %v != %v", src[best.s:best.s+best.length], src[best.offset:best.offset+best.length])) + } + } + + // We have a match, we can store the forward value + s = best.s + if best.rep > 0 { + var seq seq + seq.matchLen = uint32(best.length - zstdMinMatch) + addLiterals(&seq, best.s) + + // Repeat. If bit 4 is set, this is a non-lit repeat. + seq.offset = uint32(best.rep & 3) + if debugSequences { + println("repeat sequence", seq, "next s:", best.s, "off:", best.s-best.offset) + } + blk.sequences = append(blk.sequences, seq) + + // Index old s + 1 -> s - 1 + s = best.s + best.length + nextEmit = s + + // Index skipped... + end := s + if s > sLimit+4 { + end = sLimit + 4 + } + off := index0 + e.cur + for index0 < end { + cv0 := load6432(src, index0) + h0 := hashLen(cv0, bestLongTableBits, bestLongLen) + h1 := hashLen(cv0, bestShortTableBits, bestShortLen) + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} + off++ + index0++ + } + + switch best.rep { + case 2, 4 | 1: + offset1, offset2 = offset2, offset1 + case 3, 4 | 2: + offset1, offset2, offset3 = offset3, offset1, offset2 + case 4 | 3: + offset1, offset2, offset3 = offset1-1, offset1, offset2 + } + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, best.length) + } + break encodeLoop + } + continue + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + t := best.offset + offset1, offset2, offset3 = s-t, offset1, offset2 + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && int(offset1) > len(src) { + panic("invalid offset") + } + + // Write our sequence + var seq seq + l := best.length + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + + // Index old s + 1 -> s - 1 or sLimit + end := s + if s > sLimit-4 { + end = sLimit - 4 + } + + off := index0 + e.cur + for index0 < end { + cv0 := load6432(src, index0) + h0 := hashLen(cv0, bestLongTableBits, bestLongLen) + h1 := hashLen(cv0, bestShortTableBits, bestShortLen) + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} + index0++ + off++ + } + if s >= sLimit { + break encodeLoop + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + blk.recentOffsets[2] = uint32(offset3) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *bestFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + e.ensureHist(len(src)) + e.Encode(blk, src) +} + +// Reset will reset and set a dictionary if not nil +func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]prevEntry, len(e.table)) + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff; i < end; i += 4 { + const hashLog = bestShortTableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hashLen(cv, hashLog, bestShortLen) // 0 -> 4 + nextHash1 := hashLen(cv>>8, hashLog, bestShortLen) // 1 -> 5 + nextHash2 := hashLen(cv>>16, hashLog, bestShortLen) // 2 -> 6 + nextHash3 := hashLen(cv>>24, hashLog, bestShortLen) // 3 -> 7 + e.dictTable[nextHash] = prevEntry{ + prev: e.dictTable[nextHash].offset, + offset: i, + } + e.dictTable[nextHash1] = prevEntry{ + prev: e.dictTable[nextHash1].offset, + offset: i + 1, + } + e.dictTable[nextHash2] = prevEntry{ + prev: e.dictTable[nextHash2].offset, + offset: i + 2, + } + e.dictTable[nextHash3] = prevEntry{ + prev: e.dictTable[nextHash3].offset, + offset: i + 3, + } + } + e.lastDictID = d.id + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]prevEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + h := hashLen(cv, bestLongTableBits, bestLongLen) + e.dictLongTable[h] = prevEntry{ + offset: e.maxMatchOff, + prev: e.dictLongTable[h].offset, + } + + end := int32(len(d.content)) - 8 + e.maxMatchOff + off := 8 // First to read + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[off]) << 56) + h := hashLen(cv, bestLongTableBits, bestLongLen) + e.dictLongTable[h] = prevEntry{ + offset: i, + prev: e.dictLongTable[h].offset, + } + off++ + } + } + e.lastDictID = d.id + } + // Reset table to initial state + copy(e.longTable[:], e.dictLongTable) + + e.cur = e.maxMatchOff + // Reset table to initial state + copy(e.table[:], e.dictTable) +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go new file mode 100644 index 00000000..84a79fde --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -0,0 +1,1252 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "fmt" + +const ( + betterLongTableBits = 19 // Bits used in the long match table + betterLongTableSize = 1 << betterLongTableBits // Size of the table + betterLongLen = 8 // Bytes used for table hash + + // Note: Increasing the short table bits or making the hash shorter + // can actually lead to compression degradation since it will 'steal' more from the + // long match table and match offsets are quite big. + // This greatly depends on the type of input. + betterShortTableBits = 13 // Bits used in the short match table + betterShortTableSize = 1 << betterShortTableBits // Size of the table + betterShortLen = 5 // Bytes used for table hash + + betterLongTableShardCnt = 1 << (betterLongTableBits - dictShardBits) // Number of shards in the table + betterLongTableShardSize = betterLongTableSize / betterLongTableShardCnt // Size of an individual shard + + betterShortTableShardCnt = 1 << (betterShortTableBits - dictShardBits) // Number of shards in the table + betterShortTableShardSize = betterShortTableSize / betterShortTableShardCnt // Size of an individual shard +) + +type prevEntry struct { + offset int32 + prev int32 +} + +// betterFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. +// The long match table contains the previous entry with the same hash, +// effectively making it a "chain" of length 2. +// When we find a long match we choose between the two values and select the longest. +// When we find a short match, after checking the long, we check if we can find a long at n+1 +// and that it is longer (lazy matching). +type betterFastEncoder struct { + fastBase + table [betterShortTableSize]tableEntry + longTable [betterLongTableSize]prevEntry +} + +type betterFastEncoderDict struct { + betterFastEncoder + dictTable []tableEntry + dictLongTable []prevEntry + shortTableShardDirty [betterShortTableShardCnt]bool + longTableShardDirty [betterLongTableShardCnt]bool + allDirty bool +} + +// Encode improves compression... +func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + e.table = [betterShortTableSize]tableEntry{} + e.longTable = [betterLongTableSize]prevEntry{} + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.cur = e.maxMatchOff + break + } + // Add block to history + s := e.addBlock(src) + blk.size = len(src) + + // Check RLE first + if len(src) > zstdMinMatch { + ml := matchLen(src[1:], src) + if ml == len(src)-1 { + blk.literals = append(blk.literals, src[0]) + blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3}) + return + } + } + + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 9 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + var matched, index0 int32 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + off := s + e.cur + e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} + e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} + index0 = s + 1 + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Index match start+1 (long) -> s - 1 + index0 := s + repOff + s += length + repOff + + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + cv = load6432(src, s) + continue + } + const repOff2 = 1 + + // We deviate from the reference encoder and also check offset 2. + // Still slower and not much better, so disabled. + // repIndex = s - offset2 + repOff2 + if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { + // Consider history as well. + var seq seq + length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 2 + seq.offset = 2 + if debugSequences { + println("repeat sequence 2", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + s += length + repOff2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + cv = load6432(src, s) + // Swap offsets + offset1, offset2 = offset2, offset1 + continue + } + } + // Find the offsets of our two matches. + coffsetL := candidateL.offset - e.cur + coffsetLP := candidateL.prev - e.cur + + // Check if we have a long match. + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetL+8, src) + 8 + t = coffsetL + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 + if prevMatch > matched { + matched = prevMatch + t = coffsetLP + } + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + } + break + } + + // Check if we have a long match on prev. + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetLP+8, src) + 8 + t = coffsetLP + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + coffsetS := candidateS.offset - e.cur + + // Check if we have a short match. + if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + matched = e.matchlen(s+4, coffsetS+4, src) + 4 + + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, betterLongTableBits, betterLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = candidateL.offset - e.cur + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("long match (after short)") + } + break + } + } + + // Check prev long... + coffsetL = candidateL.prev - e.cur + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("prev long match (after short)") + } + break + } + } + t = coffsetS + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // Try to find a better match by searching for a long match at the end of the current best match + if s+matched < sLimit { + // Allow some bytes at the beginning to mismatch. + // Sweet spot is around 3 bytes, but depends on input. + // The skipped bytes are tested in Extend backwards, + // and still picked up as part of the match if they do. + const skipBeginning = 3 + + nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) + s2 := s + skipBeginning + cv := load3232(src, s2) + candidateL := e.longTable[nextHashL] + coffsetL := candidateL.offset - e.cur - matched + skipBeginning + if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + s = s2 + matched = matchedNext + if debugMatches { + println("long match at end-of-match") + } + } + } + + // Check prev long... + if true { + coffsetL = candidateL.prev - e.cur - matched + skipBeginning + if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + s = s2 + matched = matchedNext + if debugMatches { + println("prev long match at end-of-match") + } + } + } + } + } + // A match has been found. Update recent offsets. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the n-byte match as long as possible. + l := matched + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) -> s - 1 + off := index0 + e.cur + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + off += 2 + } + + cv = load6432(src, s) + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} + e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *betterFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + e.ensureHist(len(src)) + e.Encode(blk, src) +} + +// Encode improves compression... +func (e *betterFastEncoderDict) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = prevEntry{} + } + e.cur = e.maxMatchOff + e.allDirty = true + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.allDirty = true + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 9 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + var matched, index0 int32 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + off := s + e.cur + e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} + e.markShortShardDirty(nextHashS) + index0 = s + 1 + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Index match start+1 (long) -> s - 1 + s += length + repOff + + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.markLongShardDirty(h0) + h1 := hashLen(cv1, betterShortTableBits, betterShortLen) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) + index0 += 2 + } + cv = load6432(src, s) + continue + } + const repOff2 = 1 + + // We deviate from the reference encoder and also check offset 2. + // Still slower and not much better, so disabled. + // repIndex = s - offset2 + repOff2 + if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { + // Consider history as well. + var seq seq + length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 2 + seq.offset = 2 + if debugSequences { + println("repeat sequence 2", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + s += length + repOff2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.markLongShardDirty(h0) + h1 := hashLen(cv1, betterShortTableBits, betterShortLen) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) + index0 += 2 + } + cv = load6432(src, s) + // Swap offsets + offset1, offset2 = offset2, offset1 + continue + } + } + // Find the offsets of our two matches. + coffsetL := candidateL.offset - e.cur + coffsetLP := candidateL.prev - e.cur + + // Check if we have a long match. + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetL+8, src) + 8 + t = coffsetL + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 + if prevMatch > matched { + matched = prevMatch + t = coffsetLP + } + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + } + break + } + + // Check if we have a long match on prev. + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetLP+8, src) + 8 + t = coffsetLP + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + coffsetS := candidateS.offset - e.cur + + // Check if we have a short match. + if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + matched = e.matchlen(s+4, coffsetS+4, src) + 4 + + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, betterLongTableBits, betterLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = candidateL.offset - e.cur + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} + e.markLongShardDirty(nextHashL) + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("long match (after short)") + } + break + } + } + + // Check prev long... + coffsetL = candidateL.prev - e.cur + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("prev long match (after short)") + } + break + } + } + t = coffsetS + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // Try to find a better match by searching for a long match at the end of the current best match + if s+matched < sLimit { + nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) + cv := load3232(src, s) + candidateL := e.longTable[nextHashL] + coffsetL := candidateL.offset - e.cur - matched + if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + matched = matchedNext + if debugMatches { + println("long match at end-of-match") + } + } + } + + // Check prev long... + if true { + coffsetL = candidateL.prev - e.cur - matched + if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + matched = matchedNext + if debugMatches { + println("prev long match at end-of-match") + } + } + } + } + } + // A match has been found. Update recent offsets. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the n-byte match as long as possible. + l := matched + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) -> s - 1 + off := index0 + e.cur + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.markLongShardDirty(h0) + h1 := hashLen(cv1, betterShortTableBits, betterShortLen) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) + index0 += 2 + off += 2 + } + + cv = load6432(src, s) + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShortShardDirty(nextHashS) + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *betterFastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d != nil { + panic("betterFastEncoder: Reset with dict") + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]tableEntry, len(e.table)) + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff; i < end; i += 4 { + const hashLog = betterShortTableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hashLen(cv, hashLog, betterShortLen) // 0 -> 4 + nextHash1 := hashLen(cv>>8, hashLog, betterShortLen) // 1 -> 5 + nextHash2 := hashLen(cv>>16, hashLog, betterShortLen) // 2 -> 6 + nextHash3 := hashLen(cv>>24, hashLog, betterShortLen) // 3 -> 7 + e.dictTable[nextHash] = tableEntry{ + val: uint32(cv), + offset: i, + } + e.dictTable[nextHash1] = tableEntry{ + val: uint32(cv >> 8), + offset: i + 1, + } + e.dictTable[nextHash2] = tableEntry{ + val: uint32(cv >> 16), + offset: i + 2, + } + e.dictTable[nextHash3] = tableEntry{ + val: uint32(cv >> 24), + offset: i + 3, + } + } + e.lastDictID = d.id + e.allDirty = true + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]prevEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + h := hashLen(cv, betterLongTableBits, betterLongLen) + e.dictLongTable[h] = prevEntry{ + offset: e.maxMatchOff, + prev: e.dictLongTable[h].offset, + } + + end := int32(len(d.content)) - 8 + e.maxMatchOff + off := 8 // First to read + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[off]) << 56) + h := hashLen(cv, betterLongTableBits, betterLongLen) + e.dictLongTable[h] = prevEntry{ + offset: i, + prev: e.dictLongTable[h].offset, + } + off++ + } + } + e.lastDictID = d.id + e.allDirty = true + } + + // Reset table to initial state + { + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.shortTableShardDirty { + if e.shortTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + const shardCnt = betterShortTableShardCnt + const shardSize = betterShortTableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + copy(e.table[:], e.dictTable) + for i := range e.shortTableShardDirty { + e.shortTableShardDirty[i] = false + } + } else { + for i := range e.shortTableShardDirty { + if !e.shortTableShardDirty[i] { + continue + } + + copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) + e.shortTableShardDirty[i] = false + } + } + } + { + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.shortTableShardDirty { + if e.shortTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + const shardCnt = betterLongTableShardCnt + const shardSize = betterLongTableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + copy(e.longTable[:], e.dictLongTable) + for i := range e.longTableShardDirty { + e.longTableShardDirty[i] = false + } + } else { + for i := range e.longTableShardDirty { + if !e.longTableShardDirty[i] { + continue + } + + copy(e.longTable[i*shardSize:(i+1)*shardSize], e.dictLongTable[i*shardSize:(i+1)*shardSize]) + e.longTableShardDirty[i] = false + } + } + } + e.cur = e.maxMatchOff + e.allDirty = false +} + +func (e *betterFastEncoderDict) markLongShardDirty(entryNum uint32) { + e.longTableShardDirty[entryNum/betterLongTableShardSize] = true +} + +func (e *betterFastEncoderDict) markShortShardDirty(entryNum uint32) { + e.shortTableShardDirty[entryNum/betterShortTableShardSize] = true +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go new file mode 100644 index 00000000..d36be7bd --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -0,0 +1,1123 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "fmt" + +const ( + dFastLongTableBits = 17 // Bits used in the long match table + dFastLongTableSize = 1 << dFastLongTableBits // Size of the table + dFastLongTableMask = dFastLongTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + dFastLongLen = 8 // Bytes used for table hash + + dLongTableShardCnt = 1 << (dFastLongTableBits - dictShardBits) // Number of shards in the table + dLongTableShardSize = dFastLongTableSize / tableShardCnt // Size of an individual shard + + dFastShortTableBits = tableBits // Bits used in the short match table + dFastShortTableSize = 1 << dFastShortTableBits // Size of the table + dFastShortTableMask = dFastShortTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + dFastShortLen = 5 // Bytes used for table hash + +) + +type doubleFastEncoder struct { + fastEncoder + longTable [dFastLongTableSize]tableEntry +} + +type doubleFastEncoderDict struct { + fastEncoderDict + longTable [dFastLongTableSize]tableEntry + dictLongTable []tableEntry + longTableShardDirty [dLongTableShardCnt]bool +} + +// Encode mimmics functionality in zstd_dfast.c +func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + e.table = [dFastShortTableSize]tableEntry{} + e.longTable = [dFastLongTableSize]tableEntry{} + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.longTable[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + repOff + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0 + e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1 + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0 + e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1 + + cv = load6432(src, s) + + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + if e.cur >= e.bufferReset { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = tableEntry{} + } + e.cur = e.maxMatchOff + } + + s := int32(0) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + for { + + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + + if len(blk.sequences) > 2 { + if load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + //length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + int32(matchLen(src[s+4+repOff:], src[repIndex+4:])) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + repOff + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d). cur: %d", s, t, e.cur)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + // Extend the 4-byte match as long as possible. + //l := e.matchlen(s+4, t+4, src) + 4 + l := int32(matchLen(src[s+4:], src[t+4:])) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0 + e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1 + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0 + e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1 + + cv = load6432(src, s) + + if len(blk.sequences) <= 2 { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hashLen(cv1>>8, dFastShortTableBits, dFastShortLen) + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + //l := 4 + e.matchlen(s+4, o2+4, src) + l := 4 + int32(matchLen(src[s+4:], src[o2+4:])) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + + // We do not store history, so we must offset e.cur to avoid false matches for next user. + if e.cur < e.bufferReset { + e.cur += int32(len(src)) + } +} + +// Encode will encode the content, with a dictionary if initialized for it. +func (e *doubleFastEncoderDict) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = tableEntry{} + } + e.markAllShardsDirty() + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.longTable[i].offset = v + } + e.markAllShardsDirty() + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = entry + e.markShardDirty(nextHashS) + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + repOff + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + e.markLongShardDirty(nextHashL) + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + longHash1 := hashLen(cv0, dFastLongTableBits, dFastLongLen) + longHash2 := hashLen(cv1, dFastLongTableBits, dFastLongLen) + e.longTable[longHash1] = te0 + e.longTable[longHash2] = te1 + e.markLongShardDirty(longHash1) + e.markLongShardDirty(longHash2) + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + hashVal1 := hashLen(cv0, dFastShortTableBits, dFastShortLen) + hashVal2 := hashLen(cv1, dFastShortTableBits, dFastShortLen) + e.table[hashVal1] = te0 + e.markShardDirty(hashVal1) + e.table[hashVal2] = te1 + e.markShardDirty(hashVal2) + + cv = load6432(src, s) + + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = entry + e.markShardDirty(nextHashS) + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + // If we encoded more than 64K mark all dirty. + if len(src) > 64<<10 { + e.markAllShardsDirty() + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *doubleFastEncoder) Reset(d *dict, singleBlock bool) { + e.fastEncoder.Reset(d, singleBlock) + if d != nil { + panic("doubleFastEncoder: Reset with dict not supported") + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) { + allDirty := e.allDirty + e.fastEncoderDict.Reset(d, singleBlock) + if d == nil { + return + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]tableEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{ + val: uint32(cv), + offset: e.maxMatchOff, + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[i-e.maxMatchOff+7]) << 56) + e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{ + val: uint32(cv), + offset: i, + } + } + } + e.lastDictID = d.id + allDirty = true + } + // Reset table to initial state + e.cur = e.maxMatchOff + + dirtyShardCnt := 0 + if !allDirty { + for i := range e.longTableShardDirty { + if e.longTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + + if allDirty || dirtyShardCnt > dLongTableShardCnt/2 { + //copy(e.longTable[:], e.dictLongTable) + e.longTable = *(*[dFastLongTableSize]tableEntry)(e.dictLongTable) + for i := range e.longTableShardDirty { + e.longTableShardDirty[i] = false + } + return + } + for i := range e.longTableShardDirty { + if !e.longTableShardDirty[i] { + continue + } + + // copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize]) + *(*[dLongTableShardSize]tableEntry)(e.longTable[i*dLongTableShardSize:]) = *(*[dLongTableShardSize]tableEntry)(e.dictLongTable[i*dLongTableShardSize:]) + + e.longTableShardDirty[i] = false + } +} + +func (e *doubleFastEncoderDict) markLongShardDirty(entryNum uint32) { + e.longTableShardDirty[entryNum/dLongTableShardSize] = true +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go new file mode 100644 index 00000000..f45a3da7 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -0,0 +1,891 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" +) + +const ( + tableBits = 15 // Bits used in the table + tableSize = 1 << tableBits // Size of the table + tableShardCnt = 1 << (tableBits - dictShardBits) // Number of shards in the table + tableShardSize = tableSize / tableShardCnt // Size of an individual shard + tableFastHashLen = 6 + tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + maxMatchLength = 131074 +) + +type tableEntry struct { + val uint32 + offset int32 +} + +type fastEncoder struct { + fastBase + table [tableSize]tableEntry +} + +type fastEncoderDict struct { + fastEncoder + dictTable []tableEntry + tableShardDirty [tableShardCnt]bool + allDirty bool +} + +// Encode mimmics functionality in zstd_fast.c +func (e *fastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 6 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHash := hashLen(cv, hashLog, tableFastHashLen) + nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + + if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + length := 4 + e.matchlen(s+6, repIndex+4, src) + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + // Store this, since we have it. + nextHash := hashLen(cv, hashLog, tableFastHashLen) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + if debugEncoder { + if len(src) > maxCompressedBlockSize { + panic("src too big") + } + } + + // Protect against e.cur wraparound. + if e.cur >= e.bufferReset { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = e.maxMatchOff + } + + s := int32(0) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 6 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + + for { + nextHash := hashLen(cv, hashLog, tableFastHashLen) + nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + + if len(blk.sequences) > 2 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + length := 4 + e.matchlen(s+6, repIndex+4, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic(fmt.Sprintf("t (%d) < 0, candidate.offset: %d, e.cur: %d, coffset0: %d, e.maxMatchOff: %d", t, candidate.offset, e.cur, coffset0, e.maxMatchOff)) + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && t < 0 { + panic(fmt.Sprintf("t (%d) < 0 ", t)) + } + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; len(blk.sequences) > 2 && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + // Store this, since we have it. + nextHash := hashLen(cv, hashLog, tableFastHashLen) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + // We do not store history, so we must offset e.cur to avoid false matches for next user. + if e.cur < e.bufferReset { + e.cur += int32(len(src)) + } +} + +// Encode will encode the content, with a dictionary if initialized for it. +func (e *fastEncoderDict) Encode(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + if e.allDirty || len(src) > 32<<10 { + e.fastEncoder.Encode(blk, src) + e.allDirty = true + return + } + // Protect against e.cur wraparound. + for e.cur >= e.bufferReset-int32(len(e.hist)) { + if len(e.hist) == 0 { + e.table = [tableSize]tableEntry{} + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 7 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHash := hashLen(cv, hashLog, tableFastHashLen) + nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShardDirty(nextHash) + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + e.markShardDirty(nextHash2) + + if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + length := 4 + e.matchlen(s+6, repIndex+4, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + // Store this, since we have it. + nextHash := hashLen(cv, hashLog, tableFastHashLen) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShardDirty(nextHash) + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *fastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d != nil { + panic("fastEncoder: Reset with dict") + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]tableEntry, len(e.table)) + } + if true { + end := e.maxMatchOff + int32(len(d.content)) - 8 + for i := e.maxMatchOff; i < end; i += 2 { + const hashLog = tableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hashLen(cv, hashLog, tableFastHashLen) // 0 -> 6 + nextHash1 := hashLen(cv>>8, hashLog, tableFastHashLen) // 1 -> 7 + e.dictTable[nextHash] = tableEntry{ + val: uint32(cv), + offset: i, + } + e.dictTable[nextHash1] = tableEntry{ + val: uint32(cv >> 8), + offset: i + 1, + } + } + } + e.lastDictID = d.id + e.allDirty = true + } + + e.cur = e.maxMatchOff + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.tableShardDirty { + if e.tableShardDirty[i] { + dirtyShardCnt++ + } + } + } + + const shardCnt = tableShardCnt + const shardSize = tableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + //copy(e.table[:], e.dictTable) + e.table = *(*[tableSize]tableEntry)(e.dictTable) + for i := range e.tableShardDirty { + e.tableShardDirty[i] = false + } + e.allDirty = false + return + } + for i := range e.tableShardDirty { + if !e.tableShardDirty[i] { + continue + } + + //copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) + *(*[shardSize]tableEntry)(e.table[i*shardSize:]) = *(*[shardSize]tableEntry)(e.dictTable[i*shardSize:]) + e.tableShardDirty[i] = false + } + e.allDirty = false +} + +func (e *fastEncoderDict) markAllShardsDirty() { + e.allDirty = true +} + +func (e *fastEncoderDict) markShardDirty(entryNum uint32) { + e.tableShardDirty[entryNum/tableShardSize] = true +} diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go new file mode 100644 index 00000000..a79c4a52 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -0,0 +1,622 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "crypto/rand" + "fmt" + "io" + "math" + rdebug "runtime/debug" + "sync" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +// Encoder provides encoding to Zstandard. +// An Encoder can be used for either compressing a stream via the +// io.WriteCloser interface supported by the Encoder or as multiple independent +// tasks via the EncodeAll function. +// Smaller encodes are encouraged to use the EncodeAll function. +// Use NewWriter to create a new instance. +type Encoder struct { + o encoderOptions + encoders chan encoder + state encoderState + init sync.Once +} + +type encoder interface { + Encode(blk *blockEnc, src []byte) + EncodeNoHist(blk *blockEnc, src []byte) + Block() *blockEnc + CRC() *xxhash.Digest + AppendCRC([]byte) []byte + WindowSize(size int64) int32 + UseBlock(*blockEnc) + Reset(d *dict, singleBlock bool) +} + +type encoderState struct { + w io.Writer + filling []byte + current []byte + previous []byte + encoder encoder + writing *blockEnc + err error + writeErr error + nWritten int64 + nInput int64 + frameContentSize int64 + headerWritten bool + eofWritten bool + fullFrameWritten bool + + // This waitgroup indicates an encode is running. + wg sync.WaitGroup + // This waitgroup indicates we have a block encoding/writing. + wWg sync.WaitGroup +} + +// NewWriter will create a new Zstandard encoder. +// If the encoder will be used for encoding blocks a nil writer can be used. +func NewWriter(w io.Writer, opts ...EOption) (*Encoder, error) { + initPredefined() + var e Encoder + e.o.setDefault() + for _, o := range opts { + err := o(&e.o) + if err != nil { + return nil, err + } + } + if w != nil { + e.Reset(w) + } + return &e, nil +} + +func (e *Encoder) initialize() { + if e.o.concurrent == 0 { + e.o.setDefault() + } + e.encoders = make(chan encoder, e.o.concurrent) + for i := 0; i < e.o.concurrent; i++ { + enc := e.o.encoder() + e.encoders <- enc + } +} + +// Reset will re-initialize the writer and new writes will encode to the supplied writer +// as a new, independent stream. +func (e *Encoder) Reset(w io.Writer) { + s := &e.state + s.wg.Wait() + s.wWg.Wait() + if cap(s.filling) == 0 { + s.filling = make([]byte, 0, e.o.blockSize) + } + if e.o.concurrent > 1 { + if cap(s.current) == 0 { + s.current = make([]byte, 0, e.o.blockSize) + } + if cap(s.previous) == 0 { + s.previous = make([]byte, 0, e.o.blockSize) + } + s.current = s.current[:0] + s.previous = s.previous[:0] + if s.writing == nil { + s.writing = &blockEnc{lowMem: e.o.lowMem} + s.writing.init() + } + s.writing.initNewEncode() + } + if s.encoder == nil { + s.encoder = e.o.encoder() + } + s.filling = s.filling[:0] + s.encoder.Reset(e.o.dict, false) + s.headerWritten = false + s.eofWritten = false + s.fullFrameWritten = false + s.w = w + s.err = nil + s.nWritten = 0 + s.nInput = 0 + s.writeErr = nil + s.frameContentSize = 0 +} + +// ResetContentSize will reset and set a content size for the next stream. +// If the bytes written does not match the size given an error will be returned +// when calling Close(). +// This is removed when Reset is called. +// Sizes <= 0 results in no content size set. +func (e *Encoder) ResetContentSize(w io.Writer, size int64) { + e.Reset(w) + if size >= 0 { + e.state.frameContentSize = size + } +} + +// Write data to the encoder. +// Input data will be buffered and as the buffer fills up +// content will be compressed and written to the output. +// When done writing, use Close to flush the remaining output +// and write CRC if requested. +func (e *Encoder) Write(p []byte) (n int, err error) { + s := &e.state + for len(p) > 0 { + if len(p)+len(s.filling) < e.o.blockSize { + if e.o.crc { + _, _ = s.encoder.CRC().Write(p) + } + s.filling = append(s.filling, p...) + return n + len(p), nil + } + add := p + if len(p)+len(s.filling) > e.o.blockSize { + add = add[:e.o.blockSize-len(s.filling)] + } + if e.o.crc { + _, _ = s.encoder.CRC().Write(add) + } + s.filling = append(s.filling, add...) + p = p[len(add):] + n += len(add) + if len(s.filling) < e.o.blockSize { + return n, nil + } + err := e.nextBlock(false) + if err != nil { + return n, err + } + if debugAsserts && len(s.filling) > 0 { + panic(len(s.filling)) + } + } + return n, nil +} + +// nextBlock will synchronize and start compressing input in e.state.filling. +// If an error has occurred during encoding it will be returned. +func (e *Encoder) nextBlock(final bool) error { + s := &e.state + // Wait for current block. + s.wg.Wait() + if s.err != nil { + return s.err + } + if len(s.filling) > e.o.blockSize { + return fmt.Errorf("block > maxStoreBlockSize") + } + if !s.headerWritten { + // If we have a single block encode, do a sync compression. + if final && len(s.filling) == 0 && !e.o.fullZero { + s.headerWritten = true + s.fullFrameWritten = true + s.eofWritten = true + return nil + } + if final && len(s.filling) > 0 { + s.current = e.encodeAll(s.encoder, s.filling, s.current[:0]) + var n2 int + n2, s.err = s.w.Write(s.current) + if s.err != nil { + return s.err + } + s.nWritten += int64(n2) + s.nInput += int64(len(s.filling)) + s.current = s.current[:0] + s.filling = s.filling[:0] + s.headerWritten = true + s.fullFrameWritten = true + s.eofWritten = true + return nil + } + + var tmp [maxHeaderSize]byte + fh := frameHeader{ + ContentSize: uint64(s.frameContentSize), + WindowSize: uint32(s.encoder.WindowSize(s.frameContentSize)), + SingleSegment: false, + Checksum: e.o.crc, + DictID: e.o.dict.ID(), + } + + dst := fh.appendTo(tmp[:0]) + s.headerWritten = true + s.wWg.Wait() + var n2 int + n2, s.err = s.w.Write(dst) + if s.err != nil { + return s.err + } + s.nWritten += int64(n2) + } + if s.eofWritten { + // Ensure we only write it once. + final = false + } + + if len(s.filling) == 0 { + // Final block, but no data. + if final { + enc := s.encoder + blk := enc.Block() + blk.reset(nil) + blk.last = true + blk.encodeRaw(nil) + s.wWg.Wait() + _, s.err = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + s.eofWritten = true + } + return s.err + } + + // SYNC: + if e.o.concurrent == 1 { + src := s.filling + s.nInput += int64(len(s.filling)) + if debugEncoder { + println("Adding sync block,", len(src), "bytes, final:", final) + } + enc := s.encoder + blk := enc.Block() + blk.reset(nil) + enc.Encode(blk, src) + blk.last = final + if final { + s.eofWritten = true + } + + s.err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + if s.err != nil { + return s.err + } + _, s.err = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + s.filling = s.filling[:0] + return s.err + } + + // Move blocks forward. + s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current + s.nInput += int64(len(s.current)) + s.wg.Add(1) + go func(src []byte) { + if debugEncoder { + println("Adding block,", len(src), "bytes, final:", final) + } + defer func() { + if r := recover(); r != nil { + s.err = fmt.Errorf("panic while encoding: %v", r) + rdebug.PrintStack() + } + s.wg.Done() + }() + enc := s.encoder + blk := enc.Block() + enc.Encode(blk, src) + blk.last = final + if final { + s.eofWritten = true + } + // Wait for pending writes. + s.wWg.Wait() + if s.writeErr != nil { + s.err = s.writeErr + return + } + // Transfer encoders from previous write block. + blk.swapEncoders(s.writing) + // Transfer recent offsets to next. + enc.UseBlock(s.writing) + s.writing = blk + s.wWg.Add(1) + go func() { + defer func() { + if r := recover(); r != nil { + s.writeErr = fmt.Errorf("panic while encoding/writing: %v", r) + rdebug.PrintStack() + } + s.wWg.Done() + }() + s.writeErr = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + if s.writeErr != nil { + return + } + _, s.writeErr = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + }() + }(s.current) + return nil +} + +// ReadFrom reads data from r until EOF or error. +// The return value n is the number of bytes read. +// Any error except io.EOF encountered during the read is also returned. +// +// The Copy function uses ReaderFrom if available. +func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) { + if debugEncoder { + println("Using ReadFrom") + } + + // Flush any current writes. + if len(e.state.filling) > 0 { + if err := e.nextBlock(false); err != nil { + return 0, err + } + } + e.state.filling = e.state.filling[:e.o.blockSize] + src := e.state.filling + for { + n2, err := r.Read(src) + if e.o.crc { + _, _ = e.state.encoder.CRC().Write(src[:n2]) + } + // src is now the unfilled part... + src = src[n2:] + n += int64(n2) + switch err { + case io.EOF: + e.state.filling = e.state.filling[:len(e.state.filling)-len(src)] + if debugEncoder { + println("ReadFrom: got EOF final block:", len(e.state.filling)) + } + return n, nil + case nil: + default: + if debugEncoder { + println("ReadFrom: got error:", err) + } + e.state.err = err + return n, err + } + if len(src) > 0 { + if debugEncoder { + println("ReadFrom: got space left in source:", len(src)) + } + continue + } + err = e.nextBlock(false) + if err != nil { + return n, err + } + e.state.filling = e.state.filling[:e.o.blockSize] + src = e.state.filling + } +} + +// Flush will send the currently written data to output +// and block until everything has been written. +// This should only be used on rare occasions where pushing the currently queued data is critical. +func (e *Encoder) Flush() error { + s := &e.state + if len(s.filling) > 0 { + err := e.nextBlock(false) + if err != nil { + return err + } + } + s.wg.Wait() + s.wWg.Wait() + if s.err != nil { + return s.err + } + return s.writeErr +} + +// Close will flush the final output and close the stream. +// The function will block until everything has been written. +// The Encoder can still be re-used after calling this. +func (e *Encoder) Close() error { + s := &e.state + if s.encoder == nil { + return nil + } + err := e.nextBlock(true) + if err != nil { + return err + } + if s.frameContentSize > 0 { + if s.nInput != s.frameContentSize { + return fmt.Errorf("frame content size %d given, but %d bytes was written", s.frameContentSize, s.nInput) + } + } + if e.state.fullFrameWritten { + return s.err + } + s.wg.Wait() + s.wWg.Wait() + + if s.err != nil { + return s.err + } + if s.writeErr != nil { + return s.writeErr + } + + // Write CRC + if e.o.crc && s.err == nil { + // heap alloc. + var tmp [4]byte + _, s.err = s.w.Write(s.encoder.AppendCRC(tmp[:0])) + s.nWritten += 4 + } + + // Add padding with content from crypto/rand.Reader + if s.err == nil && e.o.pad > 0 { + add := calcSkippableFrame(s.nWritten, int64(e.o.pad)) + frame, err := skippableFrame(s.filling[:0], add, rand.Reader) + if err != nil { + return err + } + _, s.err = s.w.Write(frame) + } + return s.err +} + +// EncodeAll will encode all input in src and append it to dst. +// This function can be called concurrently, but each call will only run on a single goroutine. +// If empty input is given, nothing is returned, unless WithZeroFrames is specified. +// Encoded blocks can be concatenated and the result will be the combined input stream. +// Data compressed with EncodeAll can be decoded with the Decoder, +// using either a stream or DecodeAll. +func (e *Encoder) EncodeAll(src, dst []byte) []byte { + e.init.Do(e.initialize) + enc := <-e.encoders + defer func() { + e.encoders <- enc + }() + return e.encodeAll(enc, src, dst) +} + +func (e *Encoder) encodeAll(enc encoder, src, dst []byte) []byte { + if len(src) == 0 { + if e.o.fullZero { + // Add frame header. + fh := frameHeader{ + ContentSize: 0, + WindowSize: MinWindowSize, + SingleSegment: true, + // Adding a checksum would be a waste of space. + Checksum: false, + DictID: 0, + } + dst = fh.appendTo(dst) + + // Write raw block as last one only. + var blk blockHeader + blk.setSize(0) + blk.setType(blockTypeRaw) + blk.setLast(true) + dst = blk.appendTo(dst) + } + return dst + } + + // Use single segments when above minimum window and below window size. + single := len(src) <= e.o.windowSize && len(src) > MinWindowSize + if e.o.single != nil { + single = *e.o.single + } + fh := frameHeader{ + ContentSize: uint64(len(src)), + WindowSize: uint32(enc.WindowSize(int64(len(src)))), + SingleSegment: single, + Checksum: e.o.crc, + DictID: e.o.dict.ID(), + } + + // If less than 1MB, allocate a buffer up front. + if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 && !e.o.lowMem { + dst = make([]byte, 0, len(src)) + } + dst = fh.appendTo(dst) + + // If we can do everything in one block, prefer that. + if len(src) <= e.o.blockSize { + enc.Reset(e.o.dict, true) + // Slightly faster with no history and everything in one block. + if e.o.crc { + _, _ = enc.CRC().Write(src) + } + blk := enc.Block() + blk.last = true + if e.o.dict == nil { + enc.EncodeNoHist(blk, src) + } else { + enc.Encode(blk, src) + } + + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + oldout := blk.output + // Output directly to dst + blk.output = dst + + err := blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + if err != nil { + panic(err) + } + dst = blk.output + blk.output = oldout + } else { + enc.Reset(e.o.dict, false) + blk := enc.Block() + for len(src) > 0 { + todo := src + if len(todo) > e.o.blockSize { + todo = todo[:e.o.blockSize] + } + src = src[len(todo):] + if e.o.crc { + _, _ = enc.CRC().Write(todo) + } + blk.pushOffsets() + enc.Encode(blk, todo) + if len(src) == 0 { + blk.last = true + } + err := blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy) + if err != nil { + panic(err) + } + dst = append(dst, blk.output...) + blk.reset(nil) + } + } + if e.o.crc { + dst = enc.AppendCRC(dst) + } + // Add padding with content from crypto/rand.Reader + if e.o.pad > 0 { + add := calcSkippableFrame(int64(len(dst)), int64(e.o.pad)) + var err error + dst, err = skippableFrame(dst, add, rand.Reader) + if err != nil { + panic(err) + } + } + return dst +} + +// MaxEncodedSize returns the expected maximum +// size of an encoded block or stream. +func (e *Encoder) MaxEncodedSize(size int) int { + frameHeader := 4 + 2 // magic + frame header & window descriptor + if e.o.dict != nil { + frameHeader += 4 + } + // Frame content size: + if size < 256 { + frameHeader++ + } else if size < 65536+256 { + frameHeader += 2 + } else if size < math.MaxInt32 { + frameHeader += 4 + } else { + frameHeader += 8 + } + // Final crc + if e.o.crc { + frameHeader += 4 + } + + // Max overhead is 3 bytes/block. + // There cannot be 0 blocks. + blocks := (size + e.o.blockSize) / e.o.blockSize + + // Combine, add padding. + maxSz := frameHeader + 3*blocks + size + if e.o.pad > 1 { + maxSz += calcSkippableFrame(int64(maxSz), int64(e.o.pad)) + } + return maxSz +} diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go new file mode 100644 index 00000000..20671dcb --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -0,0 +1,339 @@ +package zstd + +import ( + "errors" + "fmt" + "math" + "math/bits" + "runtime" + "strings" +) + +// EOption is an option for creating a encoder. +type EOption func(*encoderOptions) error + +// options retains accumulated state of multiple options. +type encoderOptions struct { + concurrent int + level EncoderLevel + single *bool + pad int + blockSize int + windowSize int + crc bool + fullZero bool + noEntropy bool + allLitEntropy bool + customWindow bool + customALEntropy bool + customBlockSize bool + lowMem bool + dict *dict +} + +func (o *encoderOptions) setDefault() { + *o = encoderOptions{ + concurrent: runtime.GOMAXPROCS(0), + crc: true, + single: nil, + blockSize: maxCompressedBlockSize, + windowSize: 8 << 20, + level: SpeedDefault, + allLitEntropy: false, + lowMem: false, + } +} + +// encoder returns an encoder with the selected options. +func (o encoderOptions) encoder() encoder { + switch o.level { + case SpeedFastest: + if o.dict != nil { + return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} + } + return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} + + case SpeedDefault: + if o.dict != nil { + return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}} + } + return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} + case SpeedBetterCompression: + if o.dict != nil { + return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} + } + return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} + case SpeedBestCompression: + return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} + } + panic("unknown compression level") +} + +// WithEncoderCRC will add CRC value to output. +// Output will be 4 bytes larger. +func WithEncoderCRC(b bool) EOption { + return func(o *encoderOptions) error { o.crc = b; return nil } +} + +// WithEncoderConcurrency will set the concurrency, +// meaning the maximum number of encoders to run concurrently. +// The value supplied must be at least 1. +// For streams, setting a value of 1 will disable async compression. +// By default this will be set to GOMAXPROCS. +func WithEncoderConcurrency(n int) EOption { + return func(o *encoderOptions) error { + if n <= 0 { + return fmt.Errorf("concurrency must be at least 1") + } + o.concurrent = n + return nil + } +} + +// WithWindowSize will set the maximum allowed back-reference distance. +// The value must be a power of two between MinWindowSize and MaxWindowSize. +// A larger value will enable better compression but allocate more memory and, +// for above-default values, take considerably longer. +// The default value is determined by the compression level and max 8MB. +func WithWindowSize(n int) EOption { + return func(o *encoderOptions) error { + switch { + case n < MinWindowSize: + return fmt.Errorf("window size must be at least %d", MinWindowSize) + case n > MaxWindowSize: + return fmt.Errorf("window size must be at most %d", MaxWindowSize) + case (n & (n - 1)) != 0: + return errors.New("window size must be a power of 2") + } + + o.windowSize = n + o.customWindow = true + if o.blockSize > o.windowSize { + o.blockSize = o.windowSize + o.customBlockSize = true + } + return nil + } +} + +// WithEncoderPadding will add padding to all output so the size will be a multiple of n. +// This can be used to obfuscate the exact output size or make blocks of a certain size. +// The contents will be a skippable frame, so it will be invisible by the decoder. +// n must be > 0 and <= 1GB, 1<<30 bytes. +// The padded area will be filled with data from crypto/rand.Reader. +// If `EncodeAll` is used with data already in the destination, the total size will be multiple of this. +func WithEncoderPadding(n int) EOption { + return func(o *encoderOptions) error { + if n <= 0 { + return fmt.Errorf("padding must be at least 1") + } + // No need to waste our time. + if n == 1 { + n = 0 + } + if n > 1<<30 { + return fmt.Errorf("padding must less than 1GB (1<<30 bytes) ") + } + o.pad = n + return nil + } +} + +// EncoderLevel predefines encoder compression levels. +// Only use the constants made available, since the actual mapping +// of these values are very likely to change and your compression could change +// unpredictably when upgrading the library. +type EncoderLevel int + +const ( + speedNotSet EncoderLevel = iota + + // SpeedFastest will choose the fastest reasonable compression. + // This is roughly equivalent to the fastest Zstandard mode. + SpeedFastest + + // SpeedDefault is the default "pretty fast" compression option. + // This is roughly equivalent to the default Zstandard mode (level 3). + SpeedDefault + + // SpeedBetterCompression will yield better compression than the default. + // Currently it is about zstd level 7-8 with ~ 2x-3x the default CPU usage. + // By using this, notice that CPU usage may go up in the future. + SpeedBetterCompression + + // SpeedBestCompression will choose the best available compression option. + // This will offer the best compression no matter the CPU cost. + SpeedBestCompression + + // speedLast should be kept as the last actual compression option. + // The is not for external usage, but is used to keep track of the valid options. + speedLast +) + +// EncoderLevelFromString will convert a string representation of an encoding level back +// to a compression level. The compare is not case sensitive. +// If the string wasn't recognized, (false, SpeedDefault) will be returned. +func EncoderLevelFromString(s string) (bool, EncoderLevel) { + for l := speedNotSet + 1; l < speedLast; l++ { + if strings.EqualFold(s, l.String()) { + return true, l + } + } + return false, SpeedDefault +} + +// EncoderLevelFromZstd will return an encoder level that closest matches the compression +// ratio of a specific zstd compression level. +// Many input values will provide the same compression level. +func EncoderLevelFromZstd(level int) EncoderLevel { + switch { + case level < 3: + return SpeedFastest + case level >= 3 && level < 6: + return SpeedDefault + case level >= 6 && level < 10: + return SpeedBetterCompression + default: + return SpeedBestCompression + } +} + +// String provides a string representation of the compression level. +func (e EncoderLevel) String() string { + switch e { + case SpeedFastest: + return "fastest" + case SpeedDefault: + return "default" + case SpeedBetterCompression: + return "better" + case SpeedBestCompression: + return "best" + default: + return "invalid" + } +} + +// WithEncoderLevel specifies a predefined compression level. +func WithEncoderLevel(l EncoderLevel) EOption { + return func(o *encoderOptions) error { + switch { + case l <= speedNotSet || l >= speedLast: + return fmt.Errorf("unknown encoder level") + } + o.level = l + if !o.customWindow { + switch o.level { + case SpeedFastest: + o.windowSize = 4 << 20 + if !o.customBlockSize { + o.blockSize = 1 << 16 + } + case SpeedDefault: + o.windowSize = 8 << 20 + case SpeedBetterCompression: + o.windowSize = 8 << 20 + case SpeedBestCompression: + o.windowSize = 8 << 20 + } + } + if !o.customALEntropy { + o.allLitEntropy = l > SpeedDefault + } + + return nil + } +} + +// WithZeroFrames will encode 0 length input as full frames. +// This can be needed for compatibility with zstandard usage, +// but is not needed for this package. +func WithZeroFrames(b bool) EOption { + return func(o *encoderOptions) error { + o.fullZero = b + return nil + } +} + +// WithAllLitEntropyCompression will apply entropy compression if no matches are found. +// Disabling this will skip incompressible data faster, but in cases with no matches but +// skewed character distribution compression is lost. +// Default value depends on the compression level selected. +func WithAllLitEntropyCompression(b bool) EOption { + return func(o *encoderOptions) error { + o.customALEntropy = true + o.allLitEntropy = b + return nil + } +} + +// WithNoEntropyCompression will always skip entropy compression of literals. +// This can be useful if content has matches, but unlikely to benefit from entropy +// compression. Usually the slight speed improvement is not worth enabling this. +func WithNoEntropyCompression(b bool) EOption { + return func(o *encoderOptions) error { + o.noEntropy = b + return nil + } +} + +// WithSingleSegment will set the "single segment" flag when EncodeAll is used. +// If this flag is set, data must be regenerated within a single continuous memory segment. +// In this case, Window_Descriptor byte is skipped, but Frame_Content_Size is necessarily present. +// As a consequence, the decoder must allocate a memory segment of size equal or larger than size of your content. +// In order to preserve the decoder from unreasonable memory requirements, +// a decoder is allowed to reject a compressed frame which requests a memory size beyond decoder's authorized range. +// For broader compatibility, decoders are recommended to support memory sizes of at least 8 MB. +// This is only a recommendation, each decoder is free to support higher or lower limits, depending on local limitations. +// If this is not specified, block encodes will automatically choose this based on the input size and the window size. +// This setting has no effect on streamed encodes. +func WithSingleSegment(b bool) EOption { + return func(o *encoderOptions) error { + o.single = &b + return nil + } +} + +// WithLowerEncoderMem will trade in some memory cases trade less memory usage for +// slower encoding speed. +// This will not change the window size which is the primary function for reducing +// memory usage. See WithWindowSize. +func WithLowerEncoderMem(b bool) EOption { + return func(o *encoderOptions) error { + o.lowMem = b + return nil + } +} + +// WithEncoderDict allows to register a dictionary that will be used for the encode. +// +// The slice dict must be in the [dictionary format] produced by +// "zstd --train" from the Zstandard reference implementation. +// +// The encoder *may* choose to use no dictionary instead for certain payloads. +// +// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format +func WithEncoderDict(dict []byte) EOption { + return func(o *encoderOptions) error { + d, err := loadDict(dict) + if err != nil { + return err + } + o.dict = d + return nil + } +} + +// WithEncoderDictRaw registers a dictionary that may be used by the encoder. +// +// The slice content may contain arbitrary data. It will be used as an initial +// history. +func WithEncoderDictRaw(id uint32, content []byte) EOption { + return func(o *encoderOptions) error { + if bits.UintSize > 32 && uint(len(content)) > dictMaxLength { + return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content)) + } + o.dict = &dict{id: id, content: content, offsets: [3]int{1, 4, 8}} + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go new file mode 100644 index 00000000..e47af66e --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -0,0 +1,415 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "encoding/hex" + "errors" + "io" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +type frameDec struct { + o decoderOptions + crc *xxhash.Digest + + WindowSize uint64 + + // Frame history passed between blocks + history history + + rawInput byteBuffer + + // Byte buffer that can be reused for small input blocks. + bBuf byteBuf + + FrameContentSize uint64 + + DictionaryID uint32 + HasCheckSum bool + SingleSegment bool +} + +const ( + // MinWindowSize is the minimum Window Size, which is 1 KB. + MinWindowSize = 1 << 10 + + // MaxWindowSize is the maximum encoder window size + // and the default decoder maximum window size. + MaxWindowSize = 1 << 29 +) + +const ( + frameMagic = "\x28\xb5\x2f\xfd" + skippableFrameMagic = "\x2a\x4d\x18" +) + +func newFrameDec(o decoderOptions) *frameDec { + if o.maxWindowSize > o.maxDecodedSize { + o.maxWindowSize = o.maxDecodedSize + } + d := frameDec{ + o: o, + } + return &d +} + +// reset will read the frame header and prepare for block decoding. +// If nothing can be read from the input, io.EOF will be returned. +// Any other error indicated that the stream contained data, but +// there was a problem. +func (d *frameDec) reset(br byteBuffer) error { + d.HasCheckSum = false + d.WindowSize = 0 + var signature [4]byte + for { + var err error + // Check if we can read more... + b, err := br.readSmall(1) + switch err { + case io.EOF, io.ErrUnexpectedEOF: + return io.EOF + case nil: + signature[0] = b[0] + default: + return err + } + // Read the rest, don't allow io.ErrUnexpectedEOF + b, err = br.readSmall(3) + switch err { + case io.EOF: + return io.EOF + case nil: + copy(signature[1:], b) + default: + return err + } + + if string(signature[1:4]) != skippableFrameMagic || signature[0]&0xf0 != 0x50 { + if debugDecoder { + println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString([]byte(skippableFrameMagic))) + } + // Break if not skippable frame. + break + } + // Read size to skip + b, err = br.readSmall(4) + if err != nil { + if debugDecoder { + println("Reading Frame Size", err) + } + return err + } + n := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + println("Skipping frame with", n, "bytes.") + err = br.skipN(int64(n)) + if err != nil { + if debugDecoder { + println("Reading discarded frame", err) + } + return err + } + } + if string(signature[:]) != frameMagic { + if debugDecoder { + println("Got magic numbers: ", signature, "want:", []byte(frameMagic)) + } + return ErrMagicMismatch + } + + // Read Frame_Header_Descriptor + fhd, err := br.readByte() + if err != nil { + if debugDecoder { + println("Reading Frame_Header_Descriptor", err) + } + return err + } + d.SingleSegment = fhd&(1<<5) != 0 + + if fhd&(1<<3) != 0 { + return errors.New("reserved bit set on frame header") + } + + // Read Window_Descriptor + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor + d.WindowSize = 0 + if !d.SingleSegment { + wd, err := br.readByte() + if err != nil { + if debugDecoder { + println("Reading Window_Descriptor", err) + } + return err + } + if debugDecoder { + printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) + } + windowLog := 10 + (wd >> 3) + windowBase := uint64(1) << windowLog + windowAdd := (windowBase / 8) * uint64(wd&0x7) + d.WindowSize = windowBase + windowAdd + } + + // Read Dictionary_ID + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id + d.DictionaryID = 0 + if size := fhd & 3; size != 0 { + if size == 3 { + size = 4 + } + + b, err := br.readSmall(int(size)) + if err != nil { + println("Reading Dictionary_ID", err) + return err + } + var id uint32 + switch len(b) { + case 1: + id = uint32(b[0]) + case 2: + id = uint32(b[0]) | (uint32(b[1]) << 8) + case 4: + id = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + } + if debugDecoder { + println("Dict size", size, "ID:", id) + } + d.DictionaryID = id + } + + // Read Frame_Content_Size + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size + var fcsSize int + v := fhd >> 6 + switch v { + case 0: + if d.SingleSegment { + fcsSize = 1 + } + default: + fcsSize = 1 << v + } + d.FrameContentSize = fcsUnknown + if fcsSize > 0 { + b, err := br.readSmall(fcsSize) + if err != nil { + println("Reading Frame content", err) + return err + } + switch len(b) { + case 1: + d.FrameContentSize = uint64(b[0]) + case 2: + // When FCS_Field_Size is 2, the offset of 256 is added. + d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 + case 4: + d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) + case 8: + d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) + d.FrameContentSize = uint64(d1) | (uint64(d2) << 32) + } + if debugDecoder { + println("Read FCS:", d.FrameContentSize) + } + } + + // Move this to shared. + d.HasCheckSum = fhd&(1<<2) != 0 + if d.HasCheckSum { + if d.crc == nil { + d.crc = xxhash.New() + } + d.crc.Reset() + } + + if d.WindowSize > d.o.maxWindowSize { + if debugDecoder { + printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) + } + return ErrWindowSizeExceeded + } + + if d.WindowSize == 0 && d.SingleSegment { + // We may not need window in this case. + d.WindowSize = d.FrameContentSize + if d.WindowSize < MinWindowSize { + d.WindowSize = MinWindowSize + } + if d.WindowSize > d.o.maxDecodedSize { + if debugDecoder { + printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) + } + return ErrDecoderSizeExceeded + } + } + + // The minimum Window_Size is 1 KB. + if d.WindowSize < MinWindowSize { + if debugDecoder { + println("got window size: ", d.WindowSize) + } + return ErrWindowSizeTooSmall + } + d.history.windowSize = int(d.WindowSize) + if !d.o.lowMem || d.history.windowSize < maxBlockSize { + // Alloc 2x window size if not low-mem, or window size below 2MB. + d.history.allocFrameBuffer = d.history.windowSize * 2 + } else { + if d.o.lowMem { + // Alloc with 1MB extra. + d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize/2 + } else { + // Alloc with 2MB extra. + d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize + } + } + + if debugDecoder { + println("Frame: Dict:", d.DictionaryID, "FrameContentSize:", d.FrameContentSize, "singleseg:", d.SingleSegment, "window:", d.WindowSize, "crc:", d.HasCheckSum) + } + + // history contains input - maybe we do something + d.rawInput = br + return nil +} + +// next will start decoding the next block from stream. +func (d *frameDec) next(block *blockDec) error { + if debugDecoder { + println("decoding new block") + } + err := block.reset(d.rawInput, d.WindowSize) + if err != nil { + println("block error:", err) + // Signal the frame decoder we have a problem. + block.sendErr(err) + return err + } + return nil +} + +// checkCRC will check the checksum, assuming the frame has one. +// Will return ErrCRCMismatch if crc check failed, otherwise nil. +func (d *frameDec) checkCRC() error { + // We can overwrite upper tmp now + buf, err := d.rawInput.readSmall(4) + if err != nil { + println("CRC missing?", err) + return err + } + + want := binary.LittleEndian.Uint32(buf[:4]) + got := uint32(d.crc.Sum64()) + + if got != want { + if debugDecoder { + printf("CRC check failed: got %08x, want %08x\n", got, want) + } + return ErrCRCMismatch + } + if debugDecoder { + printf("CRC ok %08x\n", got) + } + return nil +} + +// consumeCRC skips over the checksum, assuming the frame has one. +func (d *frameDec) consumeCRC() error { + _, err := d.rawInput.readSmall(4) + if err != nil { + println("CRC missing?", err) + } + return err +} + +// runDecoder will run the decoder for the remainder of the frame. +func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { + saved := d.history.b + + // We use the history for output to avoid copying it. + d.history.b = dst + d.history.ignoreBuffer = len(dst) + // Store input length, so we only check new data. + crcStart := len(dst) + d.history.decoders.maxSyncLen = 0 + if d.o.limitToCap { + d.history.decoders.maxSyncLen = uint64(cap(dst) - len(dst)) + } + if d.FrameContentSize != fcsUnknown { + if !d.o.limitToCap || d.FrameContentSize+uint64(len(dst)) < d.history.decoders.maxSyncLen { + d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst)) + } + if d.history.decoders.maxSyncLen > d.o.maxDecodedSize { + if debugDecoder { + println("maxSyncLen:", d.history.decoders.maxSyncLen, "> maxDecodedSize:", d.o.maxDecodedSize) + } + return dst, ErrDecoderSizeExceeded + } + if debugDecoder { + println("maxSyncLen:", d.history.decoders.maxSyncLen) + } + if !d.o.limitToCap && uint64(cap(dst)) < d.history.decoders.maxSyncLen { + // Alloc for output + dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc) + copy(dst2, dst) + dst = dst2 + } + } + var err error + for { + err = dec.reset(d.rawInput, d.WindowSize) + if err != nil { + break + } + if debugDecoder { + println("next block:", dec) + } + err = dec.decodeBuf(&d.history) + if err != nil { + break + } + if uint64(len(d.history.b)-crcStart) > d.o.maxDecodedSize { + println("runDecoder: maxDecodedSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.o.maxDecodedSize) + err = ErrDecoderSizeExceeded + break + } + if d.o.limitToCap && len(d.history.b) > cap(dst) { + println("runDecoder: cap exceeded", uint64(len(d.history.b)), ">", cap(dst)) + err = ErrDecoderSizeExceeded + break + } + if uint64(len(d.history.b)-crcStart) > d.FrameContentSize { + println("runDecoder: FrameContentSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.FrameContentSize) + err = ErrFrameSizeExceeded + break + } + if dec.Last { + break + } + if debugDecoder { + println("runDecoder: FrameContentSize", uint64(len(d.history.b)-crcStart), "<=", d.FrameContentSize) + } + } + dst = d.history.b + if err == nil { + if d.FrameContentSize != fcsUnknown && uint64(len(d.history.b)-crcStart) != d.FrameContentSize { + err = ErrFrameSizeMismatch + } else if d.HasCheckSum { + if d.o.ignoreChecksum { + err = d.consumeCRC() + } else { + d.crc.Write(dst[crcStart:]) + err = d.checkCRC() + } + } + } + d.history.b = saved + return dst, err +} diff --git a/vendor/github.com/klauspost/compress/zstd/frameenc.go b/vendor/github.com/klauspost/compress/zstd/frameenc.go new file mode 100644 index 00000000..667ca067 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/frameenc.go @@ -0,0 +1,137 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "fmt" + "io" + "math" + "math/bits" +) + +type frameHeader struct { + ContentSize uint64 + WindowSize uint32 + SingleSegment bool + Checksum bool + DictID uint32 +} + +const maxHeaderSize = 14 + +func (f frameHeader) appendTo(dst []byte) []byte { + dst = append(dst, frameMagic...) + var fhd uint8 + if f.Checksum { + fhd |= 1 << 2 + } + if f.SingleSegment { + fhd |= 1 << 5 + } + + var dictIDContent []byte + if f.DictID > 0 { + var tmp [4]byte + if f.DictID < 256 { + fhd |= 1 + tmp[0] = uint8(f.DictID) + dictIDContent = tmp[:1] + } else if f.DictID < 1<<16 { + fhd |= 2 + binary.LittleEndian.PutUint16(tmp[:2], uint16(f.DictID)) + dictIDContent = tmp[:2] + } else { + fhd |= 3 + binary.LittleEndian.PutUint32(tmp[:4], f.DictID) + dictIDContent = tmp[:4] + } + } + var fcs uint8 + if f.ContentSize >= 256 { + fcs++ + } + if f.ContentSize >= 65536+256 { + fcs++ + } + if f.ContentSize >= 0xffffffff { + fcs++ + } + + fhd |= fcs << 6 + + dst = append(dst, fhd) + if !f.SingleSegment { + const winLogMin = 10 + windowLog := (bits.Len32(f.WindowSize-1) - winLogMin) << 3 + dst = append(dst, uint8(windowLog)) + } + if f.DictID > 0 { + dst = append(dst, dictIDContent...) + } + switch fcs { + case 0: + if f.SingleSegment { + dst = append(dst, uint8(f.ContentSize)) + } + // Unless SingleSegment is set, framessizes < 256 are not stored. + case 1: + f.ContentSize -= 256 + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8)) + case 2: + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24)) + case 3: + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24), + uint8(f.ContentSize>>32), uint8(f.ContentSize>>40), uint8(f.ContentSize>>48), uint8(f.ContentSize>>56)) + default: + panic("invalid fcs") + } + return dst +} + +const skippableFrameHeader = 4 + 4 + +// calcSkippableFrame will return a total size to be added for written +// to be divisible by multiple. +// The value will always be > skippableFrameHeader. +// The function will panic if written < 0 or wantMultiple <= 0. +func calcSkippableFrame(written, wantMultiple int64) int { + if wantMultiple <= 0 { + panic("wantMultiple <= 0") + } + if written < 0 { + panic("written < 0") + } + leftOver := written % wantMultiple + if leftOver == 0 { + return 0 + } + toAdd := wantMultiple - leftOver + for toAdd < skippableFrameHeader { + toAdd += wantMultiple + } + return int(toAdd) +} + +// skippableFrame will add a skippable frame with a total size of bytes. +// total should be >= skippableFrameHeader and < math.MaxUint32. +func skippableFrame(dst []byte, total int, r io.Reader) ([]byte, error) { + if total == 0 { + return dst, nil + } + if total < skippableFrameHeader { + return dst, fmt.Errorf("requested skippable frame (%d) < 8", total) + } + if int64(total) > math.MaxUint32 { + return dst, fmt.Errorf("requested skippable frame (%d) > max uint32", total) + } + dst = append(dst, 0x50, 0x2a, 0x4d, 0x18) + f := uint32(total - skippableFrameHeader) + dst = append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)) + start := len(dst) + dst = append(dst, make([]byte, f)...) + _, err := io.ReadFull(r, dst[start:]) + return dst, err +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go new file mode 100644 index 00000000..2f8860a7 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go @@ -0,0 +1,307 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "errors" + "fmt" + "io" +) + +const ( + tablelogAbsoluteMax = 9 +) + +const ( + /*!MEMORY_USAGE : + * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) + * Increasing memory usage improves compression ratio + * Reduced memory usage can improve speed, due to cache effect + * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ + maxMemoryUsage = tablelogAbsoluteMax + 2 + + maxTableLog = maxMemoryUsage - 2 + maxTablesize = 1 << maxTableLog + maxTableMask = (1 << maxTableLog) - 1 + minTablelog = 5 + maxSymbolValue = 255 +) + +// fseDecoder provides temporary storage for compression and decompression. +type fseDecoder struct { + dt [maxTablesize]decSymbol // Decompression table. + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + maxBits uint8 // Maximum number of additional bits + + // used for table creation to avoid allocations. + stateTable [256]uint16 + norm [maxSymbolValue + 1]int16 + preDefined bool +} + +// tableStep returns the next table index. +func tableStep(tableSize uint32) uint32 { + return (tableSize >> 1) + (tableSize >> 3) + 3 +} + +// readNCount will read the symbol distribution so decoding tables can be constructed. +func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error { + var ( + charnum uint16 + previous0 bool + ) + if b.remain() < 4 { + return errors.New("input too small") + } + bitStream := b.Uint32NC() + nbBits := uint((bitStream & 0xF) + minTablelog) // extract tableLog + if nbBits > tablelogAbsoluteMax { + println("Invalid tablelog:", nbBits) + return errors.New("tableLog too large") + } + bitStream >>= 4 + bitCount := uint(4) + + s.actualTableLog = uint8(nbBits) + remaining := int32((1 << nbBits) + 1) + threshold := int32(1 << nbBits) + gotTotal := int32(0) + nbBits++ + + for remaining > 1 && charnum <= maxSymbol { + if previous0 { + //println("prev0") + n0 := charnum + for (bitStream & 0xFFFF) == 0xFFFF { + //println("24 x 0") + n0 += 24 + if r := b.remain(); r > 5 { + b.advance(2) + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> bitCount + } else { + // end of bit stream + bitStream >>= 16 + bitCount += 16 + } + } + //printf("bitstream: %d, 0b%b", bitStream&3, bitStream) + for (bitStream & 3) == 3 { + n0 += 3 + bitStream >>= 2 + bitCount += 2 + } + n0 += uint16(bitStream & 3) + bitCount += 2 + + if n0 > maxSymbolValue { + return errors.New("maxSymbolValue too small") + } + //println("inserting ", n0-charnum, "zeroes from idx", charnum, "ending before", n0) + for charnum < n0 { + s.norm[uint8(charnum)] = 0 + charnum++ + } + + if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { + b.advance(bitCount >> 3) + bitCount &= 7 + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> bitCount + } else { + bitStream >>= 2 + } + } + + max := (2*threshold - 1) - remaining + var count int32 + + if int32(bitStream)&(threshold-1) < max { + count = int32(bitStream) & (threshold - 1) + if debugAsserts && nbBits < 1 { + panic("nbBits underflow") + } + bitCount += nbBits - 1 + } else { + count = int32(bitStream) & (2*threshold - 1) + if count >= threshold { + count -= max + } + bitCount += nbBits + } + + // extra accuracy + count-- + if count < 0 { + // -1 means +1 + remaining += count + gotTotal -= count + } else { + remaining -= count + gotTotal += count + } + s.norm[charnum&0xff] = int16(count) + charnum++ + previous0 = count == 0 + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { + b.advance(bitCount >> 3) + bitCount &= 7 + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> (bitCount & 31) + } else { + bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) + b.off = len(b.b) - 4 + bitStream = b.Uint32() >> (bitCount & 31) + } + } + s.symbolLen = charnum + if s.symbolLen <= 1 { + return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) + } + if s.symbolLen > maxSymbolValue+1 { + return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) + } + if remaining != 1 { + return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) + } + if bitCount > 32 { + return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) + } + if gotTotal != 1<> 3) + return s.buildDtable() +} + +func (s *fseDecoder) mustReadFrom(r io.Reader) { + fatalErr := func(err error) { + if err != nil { + panic(err) + } + } + // dt [maxTablesize]decSymbol // Decompression table. + // symbolLen uint16 // Length of active part of the symbol table. + // actualTableLog uint8 // Selected tablelog. + // maxBits uint8 // Maximum number of additional bits + // // used for table creation to avoid allocations. + // stateTable [256]uint16 + // norm [maxSymbolValue + 1]int16 + // preDefined bool + fatalErr(binary.Read(r, binary.LittleEndian, &s.dt)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.symbolLen)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.actualTableLog)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.maxBits)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.stateTable)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.norm)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.preDefined)) +} + +// decSymbol contains information about a state entry, +// Including the state offset base, the output symbol and +// the number of bits to read for the low part of the destination state. +// Using a composite uint64 is faster than a struct with separate members. +type decSymbol uint64 + +func newDecSymbol(nbits, addBits uint8, newState uint16, baseline uint32) decSymbol { + return decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32) +} + +func (d decSymbol) nbBits() uint8 { + return uint8(d) +} + +func (d decSymbol) addBits() uint8 { + return uint8(d >> 8) +} + +func (d decSymbol) newState() uint16 { + return uint16(d >> 16) +} + +func (d decSymbol) baselineInt() int { + return int(d >> 32) +} + +func (d *decSymbol) setNBits(nBits uint8) { + const mask = 0xffffffffffffff00 + *d = (*d & mask) | decSymbol(nBits) +} + +func (d *decSymbol) setAddBits(addBits uint8) { + const mask = 0xffffffffffff00ff + *d = (*d & mask) | (decSymbol(addBits) << 8) +} + +func (d *decSymbol) setNewState(state uint16) { + const mask = 0xffffffff0000ffff + *d = (*d & mask) | decSymbol(state)<<16 +} + +func (d *decSymbol) setExt(addBits uint8, baseline uint32) { + const mask = 0xffff00ff + *d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32) +} + +// decSymbolValue returns the transformed decSymbol for the given symbol. +func decSymbolValue(symb uint8, t []baseOffset) (decSymbol, error) { + if int(symb) >= len(t) { + return 0, fmt.Errorf("rle symbol %d >= max %d", symb, len(t)) + } + lu := t[symb] + return newDecSymbol(0, lu.addBits, 0, lu.baseLine), nil +} + +// setRLE will set the decoder til RLE mode. +func (s *fseDecoder) setRLE(symbol decSymbol) { + s.actualTableLog = 0 + s.maxBits = symbol.addBits() + s.dt[0] = symbol +} + +// transform will transform the decoder table into a table usable for +// decoding without having to apply the transformation while decoding. +// The state will contain the base value and the number of bits to read. +func (s *fseDecoder) transform(t []baseOffset) error { + tableSize := uint16(1 << s.actualTableLog) + s.maxBits = 0 + for i, v := range s.dt[:tableSize] { + add := v.addBits() + if int(add) >= len(t) { + return fmt.Errorf("invalid decoding table entry %d, symbol %d >= max (%d)", i, v.addBits(), len(t)) + } + lu := t[add] + if lu.addBits > s.maxBits { + s.maxBits = lu.addBits + } + v.setExt(lu.addBits, lu.baseLine) + s.dt[i] = v + } + return nil +} + +type fseState struct { + dt []decSymbol + state decSymbol +} + +// Initialize and decodeAsync first state and symbol. +func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) { + s.dt = dt + br.fill() + s.state = dt[br.getBits(tableLog)] +} + +// final returns the current state symbol without decoding the next. +func (s decSymbol) final() (int, uint8) { + return s.baselineInt(), s.addBits() +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go new file mode 100644 index 00000000..d04a829b --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go @@ -0,0 +1,65 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package zstd + +import ( + "fmt" +) + +type buildDtableAsmContext struct { + // inputs + stateTable *uint16 + norm *int16 + dt *uint64 + + // outputs --- set by the procedure in the case of error; + // for interpretation please see the error handling part below + errParam1 uint64 + errParam2 uint64 +} + +// buildDtable_asm is an x86 assembly implementation of fseDecoder.buildDtable. +// Function returns non-zero exit code on error. +// +//go:noescape +func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int + +// please keep in sync with _generate/gen_fse.go +const ( + errorCorruptedNormalizedCounter = 1 + errorNewStateTooBig = 2 + errorNewStateNoBits = 3 +) + +// buildDtable will build the decoding table. +func (s *fseDecoder) buildDtable() error { + ctx := buildDtableAsmContext{ + stateTable: &s.stateTable[0], + norm: &s.norm[0], + dt: (*uint64)(&s.dt[0]), + } + code := buildDtable_asm(s, &ctx) + + if code != 0 { + switch code { + case errorCorruptedNormalizedCounter: + position := ctx.errParam1 + return fmt.Errorf("corrupted input (position=%d, expected 0)", position) + + case errorNewStateTooBig: + newState := decSymbol(ctx.errParam1) + size := ctx.errParam2 + return fmt.Errorf("newState (%d) outside table size (%d)", newState, size) + + case errorNewStateNoBits: + newState := decSymbol(ctx.errParam1) + oldState := decSymbol(ctx.errParam2) + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, oldState) + + default: + return fmt.Errorf("buildDtable_asm returned unhandled nonzero code = %d", code) + } + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s new file mode 100644 index 00000000..bcde3986 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s @@ -0,0 +1,126 @@ +// Code generated by command: go run gen_fse.go -out ../fse_decoder_amd64.s -pkg=zstd. DO NOT EDIT. + +//go:build !appengine && !noasm && gc && !noasm + +// func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int +TEXT ·buildDtable_asm(SB), $0-24 + MOVQ ctx+8(FP), CX + MOVQ s+0(FP), DI + + // Load values + MOVBQZX 4098(DI), DX + XORQ AX, AX + BTSQ DX, AX + MOVQ (CX), BX + MOVQ 16(CX), SI + LEAQ -1(AX), R8 + MOVQ 8(CX), CX + MOVWQZX 4096(DI), DI + + // End load values + // Init, lay down lowprob symbols + XORQ R9, R9 + JMP init_main_loop_condition + +init_main_loop: + MOVWQSX (CX)(R9*2), R10 + CMPW R10, $-1 + JNE do_not_update_high_threshold + MOVB R9, 1(SI)(R8*8) + DECQ R8 + MOVQ $0x0000000000000001, R10 + +do_not_update_high_threshold: + MOVW R10, (BX)(R9*2) + INCQ R9 + +init_main_loop_condition: + CMPQ R9, DI + JL init_main_loop + + // Spread symbols + // Calculate table step + MOVQ AX, R9 + SHRQ $0x01, R9 + MOVQ AX, R10 + SHRQ $0x03, R10 + LEAQ 3(R9)(R10*1), R9 + + // Fill add bits values + LEAQ -1(AX), R10 + XORQ R11, R11 + XORQ R12, R12 + JMP spread_main_loop_condition + +spread_main_loop: + XORQ R13, R13 + MOVWQSX (CX)(R12*2), R14 + JMP spread_inner_loop_condition + +spread_inner_loop: + MOVB R12, 1(SI)(R11*8) + +adjust_position: + ADDQ R9, R11 + ANDQ R10, R11 + CMPQ R11, R8 + JG adjust_position + INCQ R13 + +spread_inner_loop_condition: + CMPQ R13, R14 + JL spread_inner_loop + INCQ R12 + +spread_main_loop_condition: + CMPQ R12, DI + JL spread_main_loop + TESTQ R11, R11 + JZ spread_check_ok + MOVQ ctx+8(FP), AX + MOVQ R11, 24(AX) + MOVQ $+1, ret+16(FP) + RET + +spread_check_ok: + // Build Decoding table + XORQ DI, DI + +build_table_main_table: + MOVBQZX 1(SI)(DI*8), CX + MOVWQZX (BX)(CX*2), R8 + LEAQ 1(R8), R9 + MOVW R9, (BX)(CX*2) + MOVQ R8, R9 + BSRQ R9, R9 + MOVQ DX, CX + SUBQ R9, CX + SHLQ CL, R8 + SUBQ AX, R8 + MOVB CL, (SI)(DI*8) + MOVW R8, 2(SI)(DI*8) + CMPQ R8, AX + JLE build_table_check1_ok + MOVQ ctx+8(FP), CX + MOVQ R8, 24(CX) + MOVQ AX, 32(CX) + MOVQ $+2, ret+16(FP) + RET + +build_table_check1_ok: + TESTB CL, CL + JNZ build_table_check2_ok + CMPW R8, DI + JNE build_table_check2_ok + MOVQ ctx+8(FP), AX + MOVQ R8, 24(AX) + MOVQ DI, 32(AX) + MOVQ $+3, ret+16(FP) + RET + +build_table_check2_ok: + INCQ DI + CMPQ DI, AX + JL build_table_main_table + MOVQ $+0, ret+16(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go new file mode 100644 index 00000000..8adfebb0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go @@ -0,0 +1,73 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +package zstd + +import ( + "errors" + "fmt" +) + +// buildDtable will build the decoding table. +func (s *fseDecoder) buildDtable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + symbolNext := s.stateTable[:256] + + // Init, lay down lowprob symbols + { + for i, v := range s.norm[:s.symbolLen] { + if v == -1 { + s.dt[highThreshold].setAddBits(uint8(i)) + highThreshold-- + v = 1 + } + symbolNext[i] = uint16(v) + } + } + + // Spread symbols + { + tableMask := tableSize - 1 + step := tableStep(tableSize) + position := uint32(0) + for ss, v := range s.norm[:s.symbolLen] { + for i := 0; i < int(v); i++ { + s.dt[position].setAddBits(uint8(ss)) + for { + // lowprob area + position = (position + step) & tableMask + if position <= highThreshold { + break + } + } + } + } + if position != 0 { + // position must reach all cells once, otherwise normalizedCounter is incorrect + return errors.New("corrupted input (position != 0)") + } + } + + // Build Decoding table + { + tableSize := uint16(1 << s.actualTableLog) + for u, v := range s.dt[:tableSize] { + symbol := v.addBits() + nextState := symbolNext[symbol] + symbolNext[symbol] = nextState + 1 + nBits := s.actualTableLog - byte(highBits(uint32(nextState))) + s.dt[u&maxTableMask].setNBits(nBits) + newState := (nextState << nBits) - tableSize + if newState > tableSize { + return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) + } + if newState == uint16(u) && nBits == 0 { + // Seems weird that this is possible with nbits > 0. + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) + } + s.dt[u&maxTableMask].setNewState(newState) + } + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go new file mode 100644 index 00000000..ab26326a --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go @@ -0,0 +1,701 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "math" +) + +const ( + // For encoding we only support up to + maxEncTableLog = 8 + maxEncTablesize = 1 << maxTableLog + maxEncTableMask = (1 << maxTableLog) - 1 + minEncTablelog = 5 + maxEncSymbolValue = maxMatchLengthSymbol +) + +// Scratch provides temporary storage for compression and decompression. +type fseEncoder struct { + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + ct cTable // Compression tables. + maxCount int // count of the most probable symbol + zeroBits bool // no bits has prob > 50%. + clearCount bool // clear count + useRLE bool // This encoder is for RLE + preDefined bool // This encoder is predefined. + reUsed bool // Set to know when the encoder has been reused. + rleVal uint8 // RLE Symbol + maxBits uint8 // Maximum output bits after transform. + + // TODO: Technically zstd should be fine with 64 bytes. + count [256]uint32 + norm [256]int16 +} + +// cTable contains tables used for compression. +type cTable struct { + tableSymbol []byte + stateTable []uint16 + symbolTT []symbolTransform +} + +// symbolTransform contains the state transform for a symbol. +type symbolTransform struct { + deltaNbBits uint32 + deltaFindState int16 + outBits uint8 +} + +// String prints values as a human readable string. +func (s symbolTransform) String() string { + return fmt.Sprintf("{deltabits: %08x, findstate:%d outbits:%d}", s.deltaNbBits, s.deltaFindState, s.outBits) +} + +// Histogram allows to populate the histogram and skip that step in the compression, +// It otherwise allows to inspect the histogram when compression is done. +// To indicate that you have populated the histogram call HistogramFinished +// with the value of the highest populated symbol, as well as the number of entries +// in the most populated entry. These are accepted at face value. +func (s *fseEncoder) Histogram() *[256]uint32 { + return &s.count +} + +// HistogramFinished can be called to indicate that the histogram has been populated. +// maxSymbol is the index of the highest set symbol of the next data segment. +// maxCount is the number of entries in the most populated entry. +// These are accepted at face value. +func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) { + s.maxCount = maxCount + s.symbolLen = uint16(maxSymbol) + 1 + s.clearCount = maxCount != 0 +} + +// allocCtable will allocate tables needed for compression. +// If existing tables a re big enough, they are simply re-used. +func (s *fseEncoder) allocCtable() { + tableSize := 1 << s.actualTableLog + // get tableSymbol that is big enough. + if cap(s.ct.tableSymbol) < tableSize { + s.ct.tableSymbol = make([]byte, tableSize) + } + s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] + + ctSize := tableSize + if cap(s.ct.stateTable) < ctSize { + s.ct.stateTable = make([]uint16, ctSize) + } + s.ct.stateTable = s.ct.stateTable[:ctSize] + + if cap(s.ct.symbolTT) < 256 { + s.ct.symbolTT = make([]symbolTransform, 256) + } + s.ct.symbolTT = s.ct.symbolTT[:256] +} + +// buildCTable will populate the compression table so it is ready to be used. +func (s *fseEncoder) buildCTable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + var cumul [256]int16 + + s.allocCtable() + tableSymbol := s.ct.tableSymbol[:tableSize] + // symbol start positions + { + cumul[0] = 0 + for ui, v := range s.norm[:s.symbolLen-1] { + u := byte(ui) // one less than reference + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = u + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + } + // Encode last symbol separately to avoid overflowing u + u := int(s.symbolLen - 1) + v := s.norm[s.symbolLen-1] + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = byte(u) + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + if uint32(cumul[s.symbolLen]) != tableSize { + return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) + } + cumul[s.symbolLen] = int16(tableSize) + 1 + } + // Spread symbols + s.zeroBits = false + { + step := tableStep(tableSize) + tableMask := tableSize - 1 + var position uint32 + // if any symbol > largeLimit, we may have 0 bits output. + largeLimit := int16(1 << (s.actualTableLog - 1)) + for ui, v := range s.norm[:s.symbolLen] { + symbol := byte(ui) + if v > largeLimit { + s.zeroBits = true + } + for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { + tableSymbol[position] = symbol + position = (position + step) & tableMask + for position > highThreshold { + position = (position + step) & tableMask + } /* Low proba area */ + } + } + + // Check if we have gone through all positions + if position != 0 { + return errors.New("position!=0") + } + } + + // Build table + table := s.ct.stateTable + { + tsi := int(tableSize) + for u, v := range tableSymbol { + // TableU16 : sorted by symbol order; gives next state value + table[cumul[v]] = uint16(tsi + u) + cumul[v]++ + } + } + + // Build Symbol Transformation Table + { + total := int16(0) + symbolTT := s.ct.symbolTT[:s.symbolLen] + tableLog := s.actualTableLog + tl := (uint32(tableLog) << 16) - (1 << tableLog) + for i, v := range s.norm[:s.symbolLen] { + switch v { + case 0: + case -1, 1: + symbolTT[i].deltaNbBits = tl + symbolTT[i].deltaFindState = total - 1 + total++ + default: + maxBitsOut := uint32(tableLog) - highBit(uint32(v-1)) + minStatePlus := uint32(v) << maxBitsOut + symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus + symbolTT[i].deltaFindState = total - v + total += v + } + } + if total != int16(tableSize) { + return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) + } + } + return nil +} + +var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} + +func (s *fseEncoder) setRLE(val byte) { + s.allocCtable() + s.actualTableLog = 0 + s.ct.stateTable = s.ct.stateTable[:1] + s.ct.symbolTT[val] = symbolTransform{ + deltaFindState: 0, + deltaNbBits: 0, + } + if debugEncoder { + println("setRLE: val", val, "symbolTT", s.ct.symbolTT[val]) + } + s.rleVal = val + s.useRLE = true +} + +// setBits will set output bits for the transform. +// if nil is provided, the number of bits is equal to the index. +func (s *fseEncoder) setBits(transform []byte) { + if s.reUsed || s.preDefined { + return + } + if s.useRLE { + if transform == nil { + s.ct.symbolTT[s.rleVal].outBits = s.rleVal + s.maxBits = s.rleVal + return + } + s.maxBits = transform[s.rleVal] + s.ct.symbolTT[s.rleVal].outBits = s.maxBits + return + } + if transform == nil { + for i := range s.ct.symbolTT[:s.symbolLen] { + s.ct.symbolTT[i].outBits = uint8(i) + } + s.maxBits = uint8(s.symbolLen - 1) + return + } + s.maxBits = 0 + for i, v := range transform[:s.symbolLen] { + s.ct.symbolTT[i].outBits = v + if v > s.maxBits { + // We could assume bits always going up, but we play safe. + s.maxBits = v + } + } +} + +// normalizeCount will normalize the count of the symbols so +// the total is equal to the table size. +// If successful, compression tables will also be made ready. +func (s *fseEncoder) normalizeCount(length int) error { + if s.reUsed { + return nil + } + s.optimalTableLog(length) + var ( + tableLog = s.actualTableLog + scale = 62 - uint64(tableLog) + step = (1 << 62) / uint64(length) + vStep = uint64(1) << (scale - 20) + stillToDistribute = int16(1 << tableLog) + largest int + largestP int16 + lowThreshold = (uint32)(length >> tableLog) + ) + if s.maxCount == length { + s.useRLE = true + return nil + } + s.useRLE = false + for i, cnt := range s.count[:s.symbolLen] { + // already handled + // if (count[s] == s.length) return 0; /* rle special case */ + + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + stillToDistribute-- + } else { + proba := (int16)((uint64(cnt) * step) >> scale) + if proba < 8 { + restToBeat := vStep * uint64(rtbTable[proba]) + v := uint64(cnt)*step - (uint64(proba) << scale) + if v > restToBeat { + proba++ + } + } + if proba > largestP { + largestP = proba + largest = i + } + s.norm[i] = proba + stillToDistribute -= proba + } + } + + if -stillToDistribute >= (s.norm[largest] >> 1) { + // corner case, need another normalization method + err := s.normalizeCount2(length) + if err != nil { + return err + } + if debugAsserts { + err = s.validateNorm() + if err != nil { + return err + } + } + return s.buildCTable() + } + s.norm[largest] += stillToDistribute + if debugAsserts { + err := s.validateNorm() + if err != nil { + return err + } + } + return s.buildCTable() +} + +// Secondary normalization method. +// To be used when primary method fails. +func (s *fseEncoder) normalizeCount2(length int) error { + const notYetAssigned = -2 + var ( + distributed uint32 + total = uint32(length) + tableLog = s.actualTableLog + lowThreshold = total >> tableLog + lowOne = (total * 3) >> (tableLog + 1) + ) + for i, cnt := range s.count[:s.symbolLen] { + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + distributed++ + total -= cnt + continue + } + if cnt <= lowOne { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + s.norm[i] = notYetAssigned + } + toDistribute := (1 << tableLog) - distributed + + if (total / toDistribute) > lowOne { + // risk of rounding to zero + lowOne = (total * 3) / (toDistribute * 2) + for i, cnt := range s.count[:s.symbolLen] { + if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + } + toDistribute = (1 << tableLog) - distributed + } + if distributed == uint32(s.symbolLen)+1 { + // all values are pretty poor; + // probably incompressible data (should have already been detected); + // find max, then give all remaining points to max + var maxV int + var maxC uint32 + for i, cnt := range s.count[:s.symbolLen] { + if cnt > maxC { + maxV = i + maxC = cnt + } + } + s.norm[maxV] += int16(toDistribute) + return nil + } + + if total == 0 { + // all of the symbols were low enough for the lowOne or lowThreshold + for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { + if s.norm[i] > 0 { + toDistribute-- + s.norm[i]++ + } + } + return nil + } + + var ( + vStepLog = 62 - uint64(tableLog) + mid = uint64((1 << (vStepLog - 1)) - 1) + rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining + tmpTotal = mid + ) + for i, cnt := range s.count[:s.symbolLen] { + if s.norm[i] == notYetAssigned { + var ( + end = tmpTotal + uint64(cnt)*rStep + sStart = uint32(tmpTotal >> vStepLog) + sEnd = uint32(end >> vStepLog) + weight = sEnd - sStart + ) + if weight < 1 { + return errors.New("weight < 1") + } + s.norm[i] = int16(weight) + tmpTotal = end + } + } + return nil +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *fseEncoder) optimalTableLog(length int) { + tableLog := uint8(maxEncTableLog) + minBitsSrc := highBit(uint32(length)) + 1 + minBitsSymbols := highBit(uint32(s.symbolLen-1)) + 2 + minBits := uint8(minBitsSymbols) + if minBitsSrc < minBitsSymbols { + minBits = uint8(minBitsSrc) + } + + maxBitsSrc := uint8(highBit(uint32(length-1))) - 2 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minEncTablelog { + tableLog = minEncTablelog + } + if tableLog > maxEncTableLog { + tableLog = maxEncTableLog + } + s.actualTableLog = tableLog +} + +// validateNorm validates the normalized histogram table. +func (s *fseEncoder) validateNorm() (err error) { + var total int + for _, v := range s.norm[:s.symbolLen] { + if v >= 0 { + total += int(v) + } else { + total -= int(v) + } + } + defer func() { + if err == nil { + return + } + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) + for i, v := range s.norm[:s.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) + } + }() + if total != (1 << s.actualTableLog) { + return fmt.Errorf("warning: Total == %d != %d", total, 1<> 3) + 3 + 2 + + // Write Table Size + bitStream = uint32(tableLog - minEncTablelog) + bitCount = uint(4) + remaining = int16(tableSize + 1) /* +1 for extra accuracy */ + threshold = int16(tableSize) + nbBits = uint(tableLog + 1) + outP = len(out) + ) + if cap(out) < outP+maxHeaderSize { + out = append(out, make([]byte, maxHeaderSize*3)...) + out = out[:len(out)-maxHeaderSize*3] + } + out = out[:outP+maxHeaderSize] + + // stops at 1 + for remaining > 1 { + if previous0 { + start := charnum + for s.norm[charnum] == 0 { + charnum++ + } + for charnum >= start+24 { + start += 24 + bitStream += uint32(0xFFFF) << bitCount + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + } + for charnum >= start+3 { + start += 3 + bitStream += 3 << bitCount + bitCount += 2 + } + bitStream += uint32(charnum-start) << bitCount + bitCount += 2 + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + count := s.norm[charnum] + charnum++ + max := (2*threshold - 1) - remaining + if count < 0 { + remaining += count + } else { + remaining -= count + } + count++ // +1 for extra accuracy + if count >= threshold { + count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ + } + bitStream += uint32(count) << bitCount + bitCount += nbBits + if count < max { + bitCount-- + } + + previous0 = count == 1 + if remaining < 1 { + return nil, errors.New("internal error: remaining < 1") + } + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + if outP+2 > len(out) { + return nil, fmt.Errorf("internal error: %d > %d, maxheader: %d, sl: %d, tl: %d, normcount: %v", outP+2, len(out), maxHeaderSize, s.symbolLen, int(tableLog), s.norm[:s.symbolLen]) + } + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += int((bitCount + 7) / 8) + + if charnum > s.symbolLen { + return nil, errors.New("internal error: charnum > s.symbolLen") + } + return out[:outP], nil +} + +// Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits) +// note 1 : assume symbolValue is valid (<= maxSymbolValue) +// note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits * +func (s *fseEncoder) bitCost(symbolValue uint8, accuracyLog uint32) uint32 { + minNbBits := s.ct.symbolTT[symbolValue].deltaNbBits >> 16 + threshold := (minNbBits + 1) << 16 + if debugAsserts { + if !(s.actualTableLog < 16) { + panic("!s.actualTableLog < 16") + } + // ensure enough room for renormalization double shift + if !(uint8(accuracyLog) < 31-s.actualTableLog) { + panic("!uint8(accuracyLog) < 31-s.actualTableLog") + } + } + tableSize := uint32(1) << s.actualTableLog + deltaFromThreshold := threshold - (s.ct.symbolTT[symbolValue].deltaNbBits + tableSize) + // linear interpolation (very approximate) + normalizedDeltaFromThreshold := (deltaFromThreshold << accuracyLog) >> s.actualTableLog + bitMultiplier := uint32(1) << accuracyLog + if debugAsserts { + if s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold { + panic("s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold") + } + if normalizedDeltaFromThreshold > bitMultiplier { + panic("normalizedDeltaFromThreshold > bitMultiplier") + } + } + return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold +} + +// Returns the cost in bits of encoding the distribution in count using ctable. +// Histogram should only be up to the last non-zero symbol. +// Returns an -1 if ctable cannot represent all the symbols in count. +func (s *fseEncoder) approxSize(hist []uint32) uint32 { + if int(s.symbolLen) < len(hist) { + // More symbols than we have. + return math.MaxUint32 + } + if s.useRLE { + // We will never reuse RLE encoders. + return math.MaxUint32 + } + const kAccuracyLog = 8 + badCost := (uint32(s.actualTableLog) + 1) << kAccuracyLog + var cost uint32 + for i, v := range hist { + if v == 0 { + continue + } + if s.norm[i] == 0 { + return math.MaxUint32 + } + bitCost := s.bitCost(uint8(i), kAccuracyLog) + if bitCost > badCost { + return math.MaxUint32 + } + cost += v * bitCost + } + return cost >> kAccuracyLog +} + +// maxHeaderSize returns the maximum header size in bits. +// This is not exact size, but we want a penalty for new tables anyway. +func (s *fseEncoder) maxHeaderSize() uint32 { + if s.preDefined { + return 0 + } + if s.useRLE { + return 8 + } + return (((uint32(s.symbolLen) * uint32(s.actualTableLog)) >> 3) + 3) * 8 +} + +// cState contains the compression state of a stream. +type cState struct { + bw *bitWriter + stateTable []uint16 + state uint16 +} + +// init will initialize the compression state to the first symbol of the stream. +func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) { + c.bw = bw + c.stateTable = ct.stateTable + if len(c.stateTable) == 1 { + // RLE + c.stateTable[0] = uint16(0) + c.state = 0 + return + } + nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 + im := int32((nbBitsOut << 16) - first.deltaNbBits) + lu := (im >> nbBitsOut) + int32(first.deltaFindState) + c.state = c.stateTable[lu] +} + +// flush will write the tablelog to the output and flush the remaining full bytes. +func (c *cState) flush(tableLog uint8) { + c.bw.flush32() + c.bw.addBits16NC(c.state, tableLog) +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_predefined.go b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go new file mode 100644 index 00000000..474cb77d --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go @@ -0,0 +1,158 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" + "math" + "sync" +) + +var ( + // fsePredef are the predefined fse tables as defined here: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + // These values are already transformed. + fsePredef [3]fseDecoder + + // fsePredefEnc are the predefined encoder based on fse tables as defined here: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + // These values are already transformed. + fsePredefEnc [3]fseEncoder + + // symbolTableX contain the transformations needed for each type as defined in + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets + symbolTableX [3][]baseOffset + + // maxTableSymbol is the biggest supported symbol for each table type + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets + maxTableSymbol = [3]uint8{tableLiteralLengths: maxLiteralLengthSymbol, tableOffsets: maxOffsetLengthSymbol, tableMatchLengths: maxMatchLengthSymbol} + + // bitTables is the bits table for each table. + bitTables = [3][]byte{tableLiteralLengths: llBitsTable[:], tableOffsets: nil, tableMatchLengths: mlBitsTable[:]} +) + +type tableIndex uint8 + +const ( + // indexes for fsePredef and symbolTableX + tableLiteralLengths tableIndex = 0 + tableOffsets tableIndex = 1 + tableMatchLengths tableIndex = 2 + + maxLiteralLengthSymbol = 35 + maxOffsetLengthSymbol = 30 + maxMatchLengthSymbol = 52 +) + +// baseOffset is used for calculating transformations. +type baseOffset struct { + baseLine uint32 + addBits uint8 +} + +// fillBase will precalculate base offsets with the given bit distributions. +func fillBase(dst []baseOffset, base uint32, bits ...uint8) { + if len(bits) != len(dst) { + panic(fmt.Sprintf("len(dst) (%d) != len(bits) (%d)", len(dst), len(bits))) + } + for i, bit := range bits { + if base > math.MaxInt32 { + panic("invalid decoding table, base overflows int32") + } + + dst[i] = baseOffset{ + baseLine: base, + addBits: bit, + } + base += 1 << bit + } +} + +var predef sync.Once + +func initPredefined() { + predef.Do(func() { + // Literals length codes + tmp := make([]baseOffset, 36) + for i := range tmp[:16] { + tmp[i] = baseOffset{ + baseLine: uint32(i), + addBits: 0, + } + } + fillBase(tmp[16:], 16, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) + symbolTableX[tableLiteralLengths] = tmp + + // Match length codes + tmp = make([]baseOffset, 53) + for i := range tmp[:32] { + tmp[i] = baseOffset{ + // The transformation adds the 3 length. + baseLine: uint32(i) + 3, + addBits: 0, + } + } + fillBase(tmp[32:], 35, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) + symbolTableX[tableMatchLengths] = tmp + + // Offset codes + tmp = make([]baseOffset, maxOffsetBits+1) + tmp[1] = baseOffset{ + baseLine: 1, + addBits: 1, + } + fillBase(tmp[2:], 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30) + symbolTableX[tableOffsets] = tmp + + // Fill predefined tables and transform them. + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + for i := range fsePredef[:] { + f := &fsePredef[i] + switch tableIndex(i) { + case tableLiteralLengths: + // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L243 + f.actualTableLog = 6 + copy(f.norm[:], []int16{4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1, + -1, -1, -1, -1}) + f.symbolLen = 36 + case tableOffsets: + // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L281 + f.actualTableLog = 5 + copy(f.norm[:], []int16{ + 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1}) + f.symbolLen = 29 + case tableMatchLengths: + //https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L304 + f.actualTableLog = 6 + copy(f.norm[:], []int16{ + 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, + -1, -1, -1, -1, -1}) + f.symbolLen = 53 + } + if err := f.buildDtable(); err != nil { + panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) + } + if err := f.transform(symbolTableX[i]); err != nil { + panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) + } + f.preDefined = true + + // Create encoder as well + enc := &fsePredefEnc[i] + copy(enc.norm[:], f.norm[:]) + enc.symbolLen = f.symbolLen + enc.actualTableLog = f.actualTableLog + if err := enc.buildCTable(); err != nil { + panic(fmt.Errorf("building encoding table %v: %v", tableIndex(i), err)) + } + enc.setBits(bitTables[i]) + enc.preDefined = true + } + }) +} diff --git a/vendor/github.com/klauspost/compress/zstd/hash.go b/vendor/github.com/klauspost/compress/zstd/hash.go new file mode 100644 index 00000000..5d73c21e --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/hash.go @@ -0,0 +1,35 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +const ( + prime3bytes = 506832829 + prime4bytes = 2654435761 + prime5bytes = 889523592379 + prime6bytes = 227718039650203 + prime7bytes = 58295818150454627 + prime8bytes = 0xcf1bbcdcb7a56463 +) + +// hashLen returns a hash of the lowest mls bytes of with length output bits. +// mls must be >=3 and <=8. Any other value will return hash for 4 bytes. +// length should always be < 32. +// Preferably length and mls should be a constant for inlining. +func hashLen(u uint64, length, mls uint8) uint32 { + switch mls { + case 3: + return (uint32(u<<8) * prime3bytes) >> (32 - length) + case 5: + return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length)) + case 6: + return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length)) + case 7: + return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length)) + case 8: + return uint32((u * prime8bytes) >> (64 - length)) + default: + return (uint32(u) * prime4bytes) >> (32 - length) + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/history.go b/vendor/github.com/klauspost/compress/zstd/history.go new file mode 100644 index 00000000..09164856 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/history.go @@ -0,0 +1,116 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "github.com/klauspost/compress/huff0" +) + +// history contains the information transferred between blocks. +type history struct { + // Literal decompression + huffTree *huff0.Scratch + + // Sequence decompression + decoders sequenceDecs + recentOffsets [3]int + + // History buffer... + b []byte + + // ignoreBuffer is meant to ignore a number of bytes + // when checking for matches in history + ignoreBuffer int + + windowSize int + allocFrameBuffer int // needed? + error bool + dict *dict +} + +// reset will reset the history to initial state of a frame. +// The history must already have been initialized to the desired size. +func (h *history) reset() { + h.b = h.b[:0] + h.ignoreBuffer = 0 + h.error = false + h.recentOffsets = [3]int{1, 4, 8} + h.decoders.freeDecoders() + h.decoders = sequenceDecs{br: h.decoders.br} + h.freeHuffDecoder() + h.huffTree = nil + h.dict = nil + //printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b)) +} + +func (h *history) freeHuffDecoder() { + if h.huffTree != nil { + if h.dict == nil || h.dict.litEnc != h.huffTree { + huffDecoderPool.Put(h.huffTree) + h.huffTree = nil + } + } +} + +func (h *history) setDict(dict *dict) { + if dict == nil { + return + } + h.dict = dict + h.decoders.litLengths = dict.llDec + h.decoders.offsets = dict.ofDec + h.decoders.matchLengths = dict.mlDec + h.decoders.dict = dict.content + h.recentOffsets = dict.offsets + h.huffTree = dict.litEnc +} + +// append bytes to history. +// This function will make sure there is space for it, +// if the buffer has been allocated with enough extra space. +func (h *history) append(b []byte) { + if len(b) >= h.windowSize { + // Discard all history by simply overwriting + h.b = h.b[:h.windowSize] + copy(h.b, b[len(b)-h.windowSize:]) + return + } + + // If there is space, append it. + if len(b) < cap(h.b)-len(h.b) { + h.b = append(h.b, b...) + return + } + + // Move data down so we only have window size left. + // We know we have less than window size in b at this point. + discard := len(b) + len(h.b) - h.windowSize + copy(h.b, h.b[discard:]) + h.b = h.b[:h.windowSize] + copy(h.b[h.windowSize-len(b):], b) +} + +// ensureBlock will ensure there is space for at least one block... +func (h *history) ensureBlock() { + if cap(h.b) < h.allocFrameBuffer { + h.b = make([]byte, 0, h.allocFrameBuffer) + return + } + + avail := cap(h.b) - len(h.b) + if avail >= h.windowSize || avail > maxCompressedBlockSize { + return + } + // Move data down so we only have window size left. + // We know we have less than window size in b at this point. + discard := len(h.b) - h.windowSize + copy(h.b, h.b[discard:]) + h.b = h.b[:h.windowSize] +} + +// append bytes to history without ever discarding anything. +func (h *history) appendKeep(b []byte) { + h.b = append(h.b, b...) +} diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt new file mode 100644 index 00000000..24b53065 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt @@ -0,0 +1,22 @@ +Copyright (c) 2016 Caleb Spare + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md new file mode 100644 index 00000000..777290d4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md @@ -0,0 +1,71 @@ +# xxhash + +VENDORED: Go to [github.com/cespare/xxhash](https://github.com/cespare/xxhash) for original package. + +xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a +high-quality hashing algorithm that is much faster than anything in the Go +standard library. + +This package provides a straightforward API: + +``` +func Sum64(b []byte) uint64 +func Sum64String(s string) uint64 +type Digest struct{ ... } + func New() *Digest +``` + +The `Digest` type implements hash.Hash64. Its key methods are: + +``` +func (*Digest) Write([]byte) (int, error) +func (*Digest) WriteString(string) (int, error) +func (*Digest) Sum64() uint64 +``` + +The package is written with optimized pure Go and also contains even faster +assembly implementations for amd64 and arm64. If desired, the `purego` build tag +opts into using the Go code even on those architectures. + +[xxHash]: http://cyan4973.github.io/xxHash/ + +## Compatibility + +This package is in a module and the latest code is in version 2 of the module. +You need a version of Go with at least "minimal module compatibility" to use +github.com/cespare/xxhash/v2: + +* 1.9.7+ for Go 1.9 +* 1.10.3+ for Go 1.10 +* Go 1.11 or later + +I recommend using the latest release of Go. + +## Benchmarks + +Here are some quick benchmarks comparing the pure-Go and assembly +implementations of Sum64. + +| input size | purego | asm | +| ---------- | --------- | --------- | +| 4 B | 1.3 GB/s | 1.2 GB/s | +| 16 B | 2.9 GB/s | 3.5 GB/s | +| 100 B | 6.9 GB/s | 8.1 GB/s | +| 4 KB | 11.7 GB/s | 16.7 GB/s | +| 10 MB | 12.0 GB/s | 17.3 GB/s | + +These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C +CPU using the following commands under Go 1.19.2: + +``` +benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$') +benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') +``` + +## Projects using this package + +- [InfluxDB](https://github.com/influxdata/influxdb) +- [Prometheus](https://github.com/prometheus/prometheus) +- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) +- [FreeCache](https://github.com/coocood/freecache) +- [FastCache](https://github.com/VictoriaMetrics/fastcache) diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go new file mode 100644 index 00000000..fc40c820 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go @@ -0,0 +1,230 @@ +// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described +// at http://cyan4973.github.io/xxHash/. +// THIS IS VENDORED: Go to github.com/cespare/xxhash for original package. + +package xxhash + +import ( + "encoding/binary" + "errors" + "math/bits" +) + +const ( + prime1 uint64 = 11400714785074694791 + prime2 uint64 = 14029467366897019727 + prime3 uint64 = 1609587929392839161 + prime4 uint64 = 9650029242287828579 + prime5 uint64 = 2870177450012600261 +) + +// Store the primes in an array as well. +// +// The consts are used when possible in Go code to avoid MOVs but we need a +// contiguous array of the assembly code. +var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} + +// Digest implements hash.Hash64. +type Digest struct { + v1 uint64 + v2 uint64 + v3 uint64 + v4 uint64 + total uint64 + mem [32]byte + n int // how much of mem is used +} + +// New creates a new Digest that computes the 64-bit xxHash algorithm. +func New() *Digest { + var d Digest + d.Reset() + return &d +} + +// Reset clears the Digest's state so that it can be reused. +func (d *Digest) Reset() { + d.v1 = primes[0] + prime2 + d.v2 = prime2 + d.v3 = 0 + d.v4 = -primes[0] + d.total = 0 + d.n = 0 +} + +// Size always returns 8 bytes. +func (d *Digest) Size() int { return 8 } + +// BlockSize always returns 32 bytes. +func (d *Digest) BlockSize() int { return 32 } + +// Write adds more data to d. It always returns len(b), nil. +func (d *Digest) Write(b []byte) (n int, err error) { + n = len(b) + d.total += uint64(n) + + memleft := d.mem[d.n&(len(d.mem)-1):] + + if d.n+n < 32 { + // This new data doesn't even fill the current block. + copy(memleft, b) + d.n += n + return + } + + if d.n > 0 { + // Finish off the partial block. + c := copy(memleft, b) + d.v1 = round(d.v1, u64(d.mem[0:8])) + d.v2 = round(d.v2, u64(d.mem[8:16])) + d.v3 = round(d.v3, u64(d.mem[16:24])) + d.v4 = round(d.v4, u64(d.mem[24:32])) + b = b[c:] + d.n = 0 + } + + if len(b) >= 32 { + // One or more full blocks left. + nw := writeBlocks(d, b) + b = b[nw:] + } + + // Store any remaining partial block. + copy(d.mem[:], b) + d.n = len(b) + + return +} + +// Sum appends the current hash to b and returns the resulting slice. +func (d *Digest) Sum(b []byte) []byte { + s := d.Sum64() + return append( + b, + byte(s>>56), + byte(s>>48), + byte(s>>40), + byte(s>>32), + byte(s>>24), + byte(s>>16), + byte(s>>8), + byte(s), + ) +} + +// Sum64 returns the current hash. +func (d *Digest) Sum64() uint64 { + var h uint64 + + if d.total >= 32 { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = d.v3 + prime5 + } + + h += d.total + + b := d.mem[:d.n&(len(d.mem)-1)] + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 + h = rol23(h)*prime2 + prime3 + b = b[4:] + } + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 + h = rol11(h) * prime1 + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +const ( + magic = "xxh\x06" + marshaledSize = len(magic) + 8*5 + 32 +) + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (d *Digest) MarshalBinary() ([]byte, error) { + b := make([]byte, 0, marshaledSize) + b = append(b, magic...) + b = appendUint64(b, d.v1) + b = appendUint64(b, d.v2) + b = appendUint64(b, d.v3) + b = appendUint64(b, d.v4) + b = appendUint64(b, d.total) + b = append(b, d.mem[:d.n]...) + b = b[:len(b)+len(d.mem)-d.n] + return b, nil +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +func (d *Digest) UnmarshalBinary(b []byte) error { + if len(b) < len(magic) || string(b[:len(magic)]) != magic { + return errors.New("xxhash: invalid hash state identifier") + } + if len(b) != marshaledSize { + return errors.New("xxhash: invalid hash state size") + } + b = b[len(magic):] + b, d.v1 = consumeUint64(b) + b, d.v2 = consumeUint64(b) + b, d.v3 = consumeUint64(b) + b, d.v4 = consumeUint64(b) + b, d.total = consumeUint64(b) + copy(d.mem[:], b) + d.n = int(d.total % uint64(len(d.mem))) + return nil +} + +func appendUint64(b []byte, x uint64) []byte { + var a [8]byte + binary.LittleEndian.PutUint64(a[:], x) + return append(b, a[:]...) +} + +func consumeUint64(b []byte) ([]byte, uint64) { + x := u64(b) + return b[8:], x +} + +func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } +func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } + +func round(acc, input uint64) uint64 { + acc += input * prime2 + acc = rol31(acc) + acc *= prime1 + return acc +} + +func mergeRound(acc, val uint64) uint64 { + val = round(0, val) + acc ^= val + acc = acc*prime1 + prime4 + return acc +} + +func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } +func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } +func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } +func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } +func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } +func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } +func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } +func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s new file mode 100644 index 00000000..ddb63aa9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s @@ -0,0 +1,210 @@ +//go:build !appengine && gc && !purego && !noasm +// +build !appengine +// +build gc +// +build !purego +// +build !noasm + +#include "textflag.h" + +// Registers: +#define h AX +#define d AX +#define p SI // pointer to advance through b +#define n DX +#define end BX // loop end +#define v1 R8 +#define v2 R9 +#define v3 R10 +#define v4 R11 +#define x R12 +#define prime1 R13 +#define prime2 R14 +#define prime4 DI + +#define round(acc, x) \ + IMULQ prime2, x \ + ADDQ x, acc \ + ROLQ $31, acc \ + IMULQ prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + IMULQ prime2, x \ + ROLQ $31, x \ + IMULQ prime1, x + +// mergeRound applies a merge round on the two registers acc and x. +// It assumes that prime1, prime2, and prime4 have been loaded. +#define mergeRound(acc, x) \ + round0(x) \ + XORQ x, acc \ + IMULQ prime1, acc \ + ADDQ prime4, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that there is at least one block +// to process. +#define blockLoop() \ +loop: \ + MOVQ +0(p), x \ + round(v1, x) \ + MOVQ +8(p), x \ + round(v2, x) \ + MOVQ +16(p), x \ + round(v3, x) \ + MOVQ +24(p), x \ + round(v4, x) \ + ADDQ $32, p \ + CMPQ p, end \ + JLE loop + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 + // Load fixed primes. + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 + MOVQ ·primes+24(SB), prime4 + + // Load slice. + MOVQ b_base+0(FP), p + MOVQ b_len+8(FP), n + LEAQ (p)(n*1), end + + // The first loop limit will be len(b)-32. + SUBQ $32, end + + // Check whether we have at least one block. + CMPQ n, $32 + JLT noBlocks + + // Set up initial state (v1, v2, v3, v4). + MOVQ prime1, v1 + ADDQ prime2, v1 + MOVQ prime2, v2 + XORQ v3, v3 + XORQ v4, v4 + SUBQ prime1, v4 + + blockLoop() + + MOVQ v1, h + ROLQ $1, h + MOVQ v2, x + ROLQ $7, x + ADDQ x, h + MOVQ v3, x + ROLQ $12, x + ADDQ x, h + MOVQ v4, x + ROLQ $18, x + ADDQ x, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) + + JMP afterBlocks + +noBlocks: + MOVQ ·primes+32(SB), h + +afterBlocks: + ADDQ n, h + + ADDQ $24, end + CMPQ p, end + JG try4 + +loop8: + MOVQ (p), x + ADDQ $8, p + round0(x) + XORQ x, h + ROLQ $27, h + IMULQ prime1, h + ADDQ prime4, h + + CMPQ p, end + JLE loop8 + +try4: + ADDQ $4, end + CMPQ p, end + JG try1 + + MOVL (p), x + ADDQ $4, p + IMULQ prime1, x + XORQ x, h + + ROLQ $23, h + IMULQ prime2, h + ADDQ ·primes+16(SB), h + +try1: + ADDQ $4, end + CMPQ p, end + JGE finalize + +loop1: + MOVBQZX (p), x + ADDQ $1, p + IMULQ ·primes+32(SB), x + XORQ x, h + ROLQ $11, h + IMULQ prime1, h + + CMPQ p, end + JL loop1 + +finalize: + MOVQ h, x + SHRQ $33, x + XORQ x, h + IMULQ prime2, h + MOVQ h, x + SHRQ $29, x + XORQ x, h + IMULQ ·primes+16(SB), h + MOVQ h, x + SHRQ $32, x + XORQ x, h + + MOVQ h, ret+24(FP) + RET + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 + // Load fixed primes needed for round. + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 + + // Load slice. + MOVQ b_base+8(FP), p + MOVQ b_len+16(FP), n + LEAQ (p)(n*1), end + SUBQ $32, end + + // Load vN from d. + MOVQ s+0(FP), d + MOVQ 0(d), v1 + MOVQ 8(d), v2 + MOVQ 16(d), v3 + MOVQ 24(d), v4 + + // We don't need to check the loop condition here; this function is + // always called with at least one block of data to process. + blockLoop() + + // Copy vN back to d. + MOVQ v1, 0(d) + MOVQ v2, 8(d) + MOVQ v3, 16(d) + MOVQ v4, 24(d) + + // The number of bytes written is p minus the old base pointer. + SUBQ b_base+8(FP), p + MOVQ p, ret+32(FP) + + RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s new file mode 100644 index 00000000..ae7d4d32 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s @@ -0,0 +1,184 @@ +//go:build !appengine && gc && !purego && !noasm +// +build !appengine +// +build gc +// +build !purego +// +build !noasm + +#include "textflag.h" + +// Registers: +#define digest R1 +#define h R2 // return value +#define p R3 // input pointer +#define n R4 // input length +#define nblocks R5 // n / 32 +#define prime1 R7 +#define prime2 R8 +#define prime3 R9 +#define prime4 R10 +#define prime5 R11 +#define v1 R12 +#define v2 R13 +#define v3 R14 +#define v4 R15 +#define x1 R20 +#define x2 R21 +#define x3 R22 +#define x4 R23 + +#define round(acc, x) \ + MADD prime2, acc, x, acc \ + ROR $64-31, acc \ + MUL prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + MUL prime2, x \ + ROR $64-31, x \ + MUL prime1, x + +#define mergeRound(acc, x) \ + round0(x) \ + EOR x, acc \ + MADD acc, prime4, prime1, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that n >= 32. +#define blockLoop() \ + LSR $5, n, nblocks \ + PCALIGN $16 \ + loop: \ + LDP.P 16(p), (x1, x2) \ + LDP.P 16(p), (x3, x4) \ + round(v1, x1) \ + round(v2, x2) \ + round(v3, x3) \ + round(v4, x4) \ + SUB $1, nblocks \ + CBNZ nblocks, loop + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 + LDP b_base+0(FP), (p, n) + + LDP ·primes+0(SB), (prime1, prime2) + LDP ·primes+16(SB), (prime3, prime4) + MOVD ·primes+32(SB), prime5 + + CMP $32, n + CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 } + BLT afterLoop + + ADD prime1, prime2, v1 + MOVD prime2, v2 + MOVD $0, v3 + NEG prime1, v4 + + blockLoop() + + ROR $64-1, v1, x1 + ROR $64-7, v2, x2 + ADD x1, x2 + ROR $64-12, v3, x3 + ROR $64-18, v4, x4 + ADD x3, x4 + ADD x2, x4, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) + +afterLoop: + ADD n, h + + TBZ $4, n, try8 + LDP.P 16(p), (x1, x2) + + round0(x1) + + // NOTE: here and below, sequencing the EOR after the ROR (using a + // rotated register) is worth a small but measurable speedup for small + // inputs. + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + + round0(x2) + ROR $64-27, h + EOR x2 @> 64-27, h, h + MADD h, prime4, prime1, h + +try8: + TBZ $3, n, try4 + MOVD.P 8(p), x1 + + round0(x1) + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + +try4: + TBZ $2, n, try2 + MOVWU.P 4(p), x2 + + MUL prime1, x2 + ROR $64-23, h + EOR x2 @> 64-23, h, h + MADD h, prime3, prime2, h + +try2: + TBZ $1, n, try1 + MOVHU.P 2(p), x3 + AND $255, x3, x1 + LSR $8, x3, x2 + + MUL prime5, x1 + ROR $64-11, h + EOR x1 @> 64-11, h, h + MUL prime1, h + + MUL prime5, x2 + ROR $64-11, h + EOR x2 @> 64-11, h, h + MUL prime1, h + +try1: + TBZ $0, n, finalize + MOVBU (p), x4 + + MUL prime5, x4 + ROR $64-11, h + EOR x4 @> 64-11, h, h + MUL prime1, h + +finalize: + EOR h >> 33, h + MUL prime2, h + EOR h >> 29, h + MUL prime3, h + EOR h >> 32, h + + MOVD h, ret+24(FP) + RET + +// func writeBlocks(s *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 + LDP ·primes+0(SB), (prime1, prime2) + + // Load state. Assume v[1-4] are stored contiguously. + MOVD s+0(FP), digest + LDP 0(digest), (v1, v2) + LDP 16(digest), (v3, v4) + + LDP b_base+8(FP), (p, n) + + blockLoop() + + // Store updated state. + STP (v1, v2), 0(digest) + STP (v3, v4), 16(digest) + + BIC $31, n + MOVD n, ret+32(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go new file mode 100644 index 00000000..d4221edf --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go @@ -0,0 +1,16 @@ +//go:build (amd64 || arm64) && !appengine && gc && !purego && !noasm +// +build amd64 arm64 +// +build !appengine +// +build gc +// +build !purego +// +build !noasm + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +// +//go:noescape +func Sum64(b []byte) uint64 + +//go:noescape +func writeBlocks(s *Digest, b []byte) int diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go new file mode 100644 index 00000000..0be16cef --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go @@ -0,0 +1,76 @@ +//go:build (!amd64 && !arm64) || appengine || !gc || purego || noasm +// +build !amd64,!arm64 appengine !gc purego noasm + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +func Sum64(b []byte) uint64 { + // A simpler version would be + // d := New() + // d.Write(b) + // return d.Sum64() + // but this is faster, particularly for small inputs. + + n := len(b) + var h uint64 + + if n >= 32 { + v1 := primes[0] + prime2 + v2 := prime2 + v3 := uint64(0) + v4 := -primes[0] + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = prime5 + } + + h += uint64(n) + + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 + h = rol23(h)*prime2 + prime3 + b = b[4:] + } + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 + h = rol11(h) * prime1 + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +func writeBlocks(d *Digest, b []byte) int { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + n := len(b) + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 + return n - len(b) +} diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go new file mode 100644 index 00000000..6f3b0cb1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go @@ -0,0 +1,11 @@ +package xxhash + +// Sum64String computes the 64-bit xxHash digest of s. +func Sum64String(s string) uint64 { + return Sum64([]byte(s)) +} + +// WriteString adds more data to d. It always returns len(s), nil. +func (d *Digest) WriteString(s string) (n int, err error) { + return d.Write([]byte(s)) +} diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go new file mode 100644 index 00000000..f41932b7 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go @@ -0,0 +1,16 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +// matchLen returns how many bytes match in a and b +// +// It assumes that: +// +// len(a) <= len(b) and len(a) > 0 +// +//go:noescape +func matchLen(a []byte, b []byte) int diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s new file mode 100644 index 00000000..0782b86e --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s @@ -0,0 +1,66 @@ +// Copied from S2 implementation. + +//go:build !appengine && !noasm && gc && !noasm + +#include "textflag.h" + +// func matchLen(a []byte, b []byte) int +TEXT ·matchLen(SB), NOSPLIT, $0-56 + MOVQ a_base+0(FP), AX + MOVQ b_base+24(FP), CX + MOVQ a_len+8(FP), DX + + // matchLen + XORL SI, SI + CMPL DX, $0x08 + JB matchlen_match4_standalone + +matchlen_loopback_standalone: + MOVQ (AX)(SI*1), BX + XORQ (CX)(SI*1), BX + JZ matchlen_loop_standalone + +#ifdef GOAMD64_v3 + TZCNTQ BX, BX +#else + BSFQ BX, BX +#endif + SHRL $0x03, BX + LEAL (SI)(BX*1), SI + JMP gen_match_len_end + +matchlen_loop_standalone: + LEAL -8(DX), DX + LEAL 8(SI), SI + CMPL DX, $0x08 + JAE matchlen_loopback_standalone + +matchlen_match4_standalone: + CMPL DX, $0x04 + JB matchlen_match2_standalone + MOVL (AX)(SI*1), BX + CMPL (CX)(SI*1), BX + JNE matchlen_match2_standalone + LEAL -4(DX), DX + LEAL 4(SI), SI + +matchlen_match2_standalone: + CMPL DX, $0x02 + JB matchlen_match1_standalone + MOVW (AX)(SI*1), BX + CMPW (CX)(SI*1), BX + JNE matchlen_match1_standalone + LEAL -2(DX), DX + LEAL 2(SI), SI + +matchlen_match1_standalone: + CMPL DX, $0x01 + JB gen_match_len_end + MOVB (AX)(SI*1), BL + CMPB (CX)(SI*1), BL + JNE gen_match_len_end + INCL SI + +gen_match_len_end: + MOVQ SI, ret+48(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go new file mode 100644 index 00000000..57b9c31c --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go @@ -0,0 +1,33 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +import ( + "encoding/binary" + "math/bits" +) + +// matchLen returns the maximum common prefix length of a and b. +// a must be the shortest of the two. +func matchLen(a, b []byte) (n int) { + for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] { + diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b) + if diff != 0 { + return n + bits.TrailingZeros64(diff)>>3 + } + n += 8 + } + + for i := range a { + if a[i] != b[i] { + break + } + n++ + } + return n + +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go new file mode 100644 index 00000000..d7fe6d82 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go @@ -0,0 +1,503 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "io" +) + +type seq struct { + litLen uint32 + matchLen uint32 + offset uint32 + + // Codes are stored here for the encoder + // so they only have to be looked up once. + llCode, mlCode, ofCode uint8 +} + +type seqVals struct { + ll, ml, mo int +} + +func (s seq) String() string { + if s.offset <= 3 { + if s.offset == 0 { + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset: INVALID (0)") + } + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset, " (repeat)") + } + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset-3, " (new)") +} + +type seqCompMode uint8 + +const ( + compModePredefined seqCompMode = iota + compModeRLE + compModeFSE + compModeRepeat +) + +type sequenceDec struct { + // decoder keeps track of the current state and updates it from the bitstream. + fse *fseDecoder + state fseState + repeat bool +} + +// init the state of the decoder with input from stream. +func (s *sequenceDec) init(br *bitReader) error { + if s.fse == nil { + return errors.New("sequence decoder not defined") + } + s.state.init(br, s.fse.actualTableLog, s.fse.dt[:1< cap(s.out) { + addBytes := s.seqSize + len(s.out) + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with hist %d, dict %d, literals: %d into %d bytes\n", len(seqs), len(hist), len(s.dict), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + for _, seq := range seqs { + // Add literals + copy(out[t:], s.literals[:seq.ll]) + t += seq.ll + s.literals = s.literals[seq.ll:] + + // Copy from dictionary... + if seq.mo > t+len(hist) || seq.mo > s.windowSize { + if len(s.dict) == 0 { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) + } + + // we may be in dictionary. + dictO := len(s.dict) - (seq.mo - (t + len(hist))) + if dictO < 0 || dictO >= len(s.dict) { + return fmt.Errorf("match offset (%d) bigger than current history+dict (%d)", seq.mo, t+len(hist)+len(s.dict)) + } + end := dictO + seq.ml + if end > len(s.dict) { + n := len(s.dict) - dictO + copy(out[t:], s.dict[dictO:]) + t += n + seq.ml -= n + } else { + copy(out[t:], s.dict[dictO:end]) + t += end - dictO + continue + } + } + + // Copy from history. + if v := seq.mo - t; v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if seq.ml > v { + // Some goes into current block. + // Copy remainder of history + copy(out[t:], hist[start:]) + t += v + seq.ml -= v + } else { + copy(out[t:], hist[start:start+seq.ml]) + t += seq.ml + continue + } + } + // We must be in current buffer now + if seq.ml > 0 { + start := t - seq.mo + if seq.ml <= t-start { + // No overlap + copy(out[t:], out[start:start+seq.ml]) + t += seq.ml + continue + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + src := out[start : start+seq.ml] + dst := out[t:] + dst = dst[:len(src)] + t += len(src) + // Destination is the space we just added. + for i := range src { + dst[i] = src[i] + } + } + } + } + + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} + +// decode sequences from the stream with the provided history. +func (s *sequenceDecs) decodeSync(hist []byte) error { + supported, err := s.decodeSyncSimple(hist) + if supported { + return err + } + + br := s.br + seqs := s.nSeqs + startSize := len(s.out) + // Grab full sizes tables, to avoid bounds checks. + llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] + llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + out := s.out + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + if debugDecoder { + println("decodeSync: decoding", seqs, "sequences", br.remain(), "bits remain on stream") + } + for i := seqs - 1; i >= 0; i-- { + if br.overread() { + printf("reading sequence %d, exceeded available data. Overread by %d\n", seqs-i, -br.remain()) + return io.ErrUnexpectedEOF + } + var ll, mo, ml int + if len(br.in) > 4+((maxOffsetBits+16+16)>>3) { + // inlined function: + // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) + + // Final will not read from stream. + var llB, mlB, moB uint8 + ll, llB = llState.final() + ml, mlB = mlState.final() + mo, moB = ofState.final() + + // extra bits are stored in reverse order. + br.fillFast() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fillFast() + } + ml += br.getBits(mlB) + ll += br.getBits(llB) + + if moB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = mo + } else { + // mo = s.adjustOffset(mo, ll, moB) + // Inlined for rather big speedup + if ll == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + mo++ + } + + if mo == 0 { + mo = s.prevOffset[0] + } else { + var temp int + if mo == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[mo] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("WARNING: temp was 0") + temp = 1 + } + + if mo != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + mo = temp + } + } + br.fillFast() + } else { + ll, mo, ml = s.next(br, llState, mlState, ofState) + br.fill() + } + + if debugSequences { + println("Seq", seqs-i-1, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) + } + + if ll > len(s.literals) { + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, len(s.literals)) + } + size := ll + ml + len(out) + if size-startSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + if size > cap(out) { + // Not enough size, which can happen under high volume block streaming conditions + // but could be if destination slice is too small for sync operations. + // over-allocating here can create a large amount of GC pressure so we try to keep + // it as contained as possible + used := len(out) - startSize + addBytes := 256 + ll + ml + used>>2 + // Clamp to max block size. + if used+addBytes > maxBlockSize { + addBytes = maxBlockSize - used + } + out = append(out, make([]byte, addBytes)...) + out = out[:len(out)-addBytes] + } + if ml > maxMatchLen { + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + } + + // Add literals + out = append(out, s.literals[:ll]...) + s.literals = s.literals[ll:] + + if mo == 0 && ml > 0 { + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + } + + if mo > len(out)+len(hist) || mo > s.windowSize { + if len(s.dict) == 0 { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) + } + + // we may be in dictionary. + dictO := len(s.dict) - (mo - (len(out) + len(hist))) + if dictO < 0 || dictO >= len(s.dict) { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) + } + end := dictO + ml + if end > len(s.dict) { + out = append(out, s.dict[dictO:]...) + ml -= len(s.dict) - dictO + } else { + out = append(out, s.dict[dictO:end]...) + mo = 0 + ml = 0 + } + } + + // Copy from history. + // TODO: Blocks without history could be made to ignore this completely. + if v := mo - len(out); v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if ml > v { + // Some goes into current block. + // Copy remainder of history + out = append(out, hist[start:]...) + ml -= v + } else { + out = append(out, hist[start:start+ml]...) + ml = 0 + } + } + // We must be in current buffer now + if ml > 0 { + start := len(out) - mo + if ml <= len(out)-start { + // No overlap + out = append(out, out[start:start+ml]...) + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + out = out[:len(out)+ml] + src := out[start : start+ml] + // Destination is the space we just added. + dst := out[len(out)-ml:] + dst = dst[:len(src)] + for i := range src { + dst[i] = src[i] + } + } + } + if i == 0 { + // This is the last sequence, so we shouldn't update state. + break + } + + // Manually inlined, ~ 5-20% faster + // Update all 3 states at once. Approx 20% faster. + nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() + if nBits == 0 { + llState = llTable[llState.newState()&maxTableMask] + mlState = mlTable[mlState.newState()&maxTableMask] + ofState = ofTable[ofState.newState()&maxTableMask] + } else { + bits := br.get32BitsFast(nBits) + + lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) + llState = llTable[(llState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits >> (ofState.nbBits() & 31)) + lowBits &= bitMask[mlState.nbBits()&15] + mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] + ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] + } + } + + if size := len(s.literals) + len(out) - startSize; size > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + + // Add final literals + s.out = append(out, s.literals...) + return br.close() +} + +var bitMask [16]uint16 + +func init() { + for i := range bitMask[:] { + bitMask[i] = uint16((1 << uint(i)) - 1) + } +} + +func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { + // Final will not read from stream. + ll, llB := llState.final() + ml, mlB := mlState.final() + mo, moB := ofState.final() + + // extra bits are stored in reverse order. + br.fill() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fill() + } + // matchlength+literal length, max 32 bits + ml += br.getBits(mlB) + ll += br.getBits(llB) + mo = s.adjustOffset(mo, ll, moB) + return +} + +func (s *sequenceDecs) adjustOffset(offset, litLen int, offsetB uint8) int { + if offsetB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = offset + return offset + } + + if litLen == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + offset++ + } + + if offset == 0 { + return s.prevOffset[0] + } + var temp int + if offset == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[offset] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("temp was 0") + temp = 1 + } + + if offset != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + return temp +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go new file mode 100644 index 00000000..c59f17e0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go @@ -0,0 +1,394 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package zstd + +import ( + "fmt" + "io" + + "github.com/klauspost/compress/internal/cpuinfo" +) + +type decodeSyncAsmContext struct { + llTable []decSymbol + mlTable []decSymbol + ofTable []decSymbol + llState uint64 + mlState uint64 + ofState uint64 + iteration int + litRemain int + out []byte + outPosition int + literals []byte + litPosition int + history []byte + windowSize int + ll int // set on error (not for all errors, please refer to _generate/gen.go) + ml int // set on error (not for all errors, please refer to _generate/gen.go) + mo int // set on error (not for all errors, please refer to _generate/gen.go) +} + +// sequenceDecs_decodeSync_amd64 implements the main loop of sequenceDecs.decodeSync in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_bmi2 implements the main loop of sequenceDecs.decodeSync in x86 asm with BMI2 extensions. +// +//go:noescape +func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_safe_amd64 does the same as above, but does not write more than output buffer. +// +//go:noescape +func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_safe_bmi2 does the same as above, but does not write more than output buffer. +// +//go:noescape +func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// decode sequences from the stream with the provided history but without a dictionary. +func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { + if len(s.dict) > 0 { + return false, nil + } + if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSize { + return false, nil + } + + // FIXME: Using unsafe memory copies leads to rare, random crashes + // with fuzz testing. It is therefore disabled for now. + const useSafe = true + /* + useSafe := false + if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSizeAlloc { + useSafe = true + } + if s.maxSyncLen > 0 && cap(s.out)-len(s.out)-compressedBlockOverAlloc < int(s.maxSyncLen) { + useSafe = true + } + if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { + useSafe = true + } + */ + + br := s.br + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + ctx := decodeSyncAsmContext{ + llTable: s.litLengths.fse.dt[:maxTablesize], + mlTable: s.matchLengths.fse.dt[:maxTablesize], + ofTable: s.offsets.fse.dt[:maxTablesize], + llState: uint64(s.litLengths.state.state), + mlState: uint64(s.matchLengths.state.state), + ofState: uint64(s.offsets.state.state), + iteration: s.nSeqs - 1, + litRemain: len(s.literals), + out: s.out, + outPosition: len(s.out), + literals: s.literals, + windowSize: s.windowSize, + history: hist, + } + + s.seqSize = 0 + startSize := len(s.out) + + var errCode int + if cpuinfo.HasBMI2() { + if useSafe { + errCode = sequenceDecs_decodeSync_safe_bmi2(s, br, &ctx) + } else { + errCode = sequenceDecs_decodeSync_bmi2(s, br, &ctx) + } + } else { + if useSafe { + errCode = sequenceDecs_decodeSync_safe_amd64(s, br, &ctx) + } else { + errCode = sequenceDecs_decodeSync_amd64(s, br, &ctx) + } + } + switch errCode { + case noError: + break + + case errorMatchLenOfsMismatch: + return true, fmt.Errorf("zero matchoff and matchlen (%d) > 0", ctx.ml) + + case errorMatchLenTooBig: + return true, fmt.Errorf("match len (%d) bigger than max allowed length", ctx.ml) + + case errorMatchOffTooBig: + return true, fmt.Errorf("match offset (%d) bigger than current history (%d)", + ctx.mo, ctx.outPosition+len(hist)-startSize) + + case errorNotEnoughLiterals: + return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", + ctx.ll, ctx.litRemain+ctx.ll) + + case errorOverread: + return true, io.ErrUnexpectedEOF + + case errorNotEnoughSpace: + size := ctx.outPosition + ctx.ll + ctx.ml + if debugDecoder { + println("msl:", s.maxSyncLen, "cap", cap(s.out), "bef:", startSize, "sz:", size-startSize, "mbs:", maxBlockSize, "outsz:", cap(s.out)-startSize) + } + return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + + default: + return true, fmt.Errorf("sequenceDecs_decode returned erroneous code %d", errCode) + } + + s.seqSize += ctx.litRemain + if s.seqSize > maxBlockSize { + return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + return true, err + } + + s.literals = s.literals[ctx.litPosition:] + t := ctx.outPosition + s.out = s.out[:t] + + // Add final literals + s.out = append(s.out, s.literals...) + if debugDecoder { + t += len(s.literals) + if t != len(s.out) { + panic(fmt.Errorf("length mismatch, want %d, got %d", len(s.out), t)) + } + } + + return true, nil +} + +// -------------------------------------------------------------------------------- + +type decodeAsmContext struct { + llTable []decSymbol + mlTable []decSymbol + ofTable []decSymbol + llState uint64 + mlState uint64 + ofState uint64 + iteration int + seqs []seqVals + litRemain int +} + +const noError = 0 + +// error reported when mo == 0 && ml > 0 +const errorMatchLenOfsMismatch = 1 + +// error reported when ml > maxMatchLen +const errorMatchLenTooBig = 2 + +// error reported when mo > available history or mo > s.windowSize +const errorMatchOffTooBig = 3 + +// error reported when the sum of literal lengths exeeceds the literal buffer size +const errorNotEnoughLiterals = 4 + +// error reported when capacity of `out` is too small +const errorNotEnoughSpace = 5 + +// error reported when bits are overread. +const errorOverread = 6 + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. +// +//go:noescape +func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. +// +//go:noescape +func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// decode sequences from the stream without the provided history. +func (s *sequenceDecs) decode(seqs []seqVals) error { + br := s.br + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + ctx := decodeAsmContext{ + llTable: s.litLengths.fse.dt[:maxTablesize], + mlTable: s.matchLengths.fse.dt[:maxTablesize], + ofTable: s.offsets.fse.dt[:maxTablesize], + llState: uint64(s.litLengths.state.state), + mlState: uint64(s.matchLengths.state.state), + ofState: uint64(s.offsets.state.state), + seqs: seqs, + iteration: len(seqs) - 1, + litRemain: len(s.literals), + } + + if debugDecoder { + println("decode: decoding", len(seqs), "sequences", br.remain(), "bits remain on stream") + } + + s.seqSize = 0 + lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56 + var errCode int + if cpuinfo.HasBMI2() { + if lte56bits { + errCode = sequenceDecs_decode_56_bmi2(s, br, &ctx) + } else { + errCode = sequenceDecs_decode_bmi2(s, br, &ctx) + } + } else { + if lte56bits { + errCode = sequenceDecs_decode_56_amd64(s, br, &ctx) + } else { + errCode = sequenceDecs_decode_amd64(s, br, &ctx) + } + } + if errCode != 0 { + i := len(seqs) - ctx.iteration - 1 + switch errCode { + case errorMatchLenOfsMismatch: + ml := ctx.seqs[i].ml + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + + case errorMatchLenTooBig: + ml := ctx.seqs[i].ml + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + + case errorNotEnoughLiterals: + ll := ctx.seqs[i].ll + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll) + case errorOverread: + return io.ErrUnexpectedEOF + } + + return fmt.Errorf("sequenceDecs_decode_amd64 returned erroneous code %d", errCode) + } + + if ctx.litRemain < 0 { + return fmt.Errorf("literal count is too big: total available %d, total requested %d", + len(s.literals), len(s.literals)-ctx.litRemain) + } + + s.seqSize += ctx.litRemain + if s.seqSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + if debugDecoder { + println("decode: ", br.remain(), "bits remain on stream. code:", errCode) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + } + return err +} + +// -------------------------------------------------------------------------------- + +type executeAsmContext struct { + seqs []seqVals + seqIndex int + out []byte + history []byte + literals []byte + outPosition int + litPosition int + windowSize int +} + +// sequenceDecs_executeSimple_amd64 implements the main loop of sequenceDecs.executeSimple in x86 asm. +// +// Returns false if a match offset is too big. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool + +// Same as above, but with safe memcopies +// +//go:noescape +func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool + +// executeSimple handles cases when dictionary is not used. +func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { + // Ensure we have enough output size... + if len(s.out)+s.seqSize+compressedBlockOverAlloc > cap(s.out) { + addBytes := s.seqSize + len(s.out) + compressedBlockOverAlloc + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + ctx := executeAsmContext{ + seqs: seqs, + seqIndex: 0, + out: out, + history: hist, + outPosition: t, + litPosition: 0, + literals: s.literals, + windowSize: s.windowSize, + } + var ok bool + if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { + ok = sequenceDecs_executeSimple_safe_amd64(&ctx) + } else { + ok = sequenceDecs_executeSimple_amd64(&ctx) + } + if !ok { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", + seqs[ctx.seqIndex].mo, ctx.outPosition+len(hist)) + } + s.literals = s.literals[ctx.litPosition:] + t = ctx.outPosition + + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s new file mode 100644 index 00000000..f5591fa1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s @@ -0,0 +1,4151 @@ +// Code generated by command: go run gen.go -out ../seqdec_amd64.s -pkg=zstd. DO NOT EDIT. + +//go:build !appengine && !noasm && gc && !noasm + +// func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: CMOV +TEXT ·sequenceDecs_decode_amd64(SB), $8-32 + MOVQ br+8(FP), CX + MOVQ 24(CX), DX + MOVBQZX 32(CX), BX + MOVQ (CX), AX + MOVQ 8(CX), SI + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + MOVQ 104(AX), R10 + MOVQ s+0(FP), AX + MOVQ 144(AX), R11 + MOVQ 152(AX), R12 + MOVQ 160(AX), R13 + +sequenceDecs_decode_amd64_main_loop: + MOVQ (SP), R14 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decode_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_amd64_fill_end + +sequenceDecs_decode_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_amd64_fill_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decode_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_amd64_fill_byte_by_byte + +sequenceDecs_decode_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decode_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_amd64_of_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_amd64_of_update_zero: + MOVQ AX, 16(R10) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_amd64_ml_update_zero: + MOVQ AX, 8(R10) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decode_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_amd64_fill_2_end + +sequenceDecs_decode_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_amd64_fill_2_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decode_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_amd64_fill_2_byte_by_byte + +sequenceDecs_decode_amd64_fill_2_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decode_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_amd64_ll_update_zero: + MOVQ AX, (R10) + + // Fill bitreader for state updates + MOVQ R14, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R14 + SHRL $0x10, DI + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R14 + SHRL $0x10, R8 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R14 + SHRL $0x10, R9 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decode_amd64_skip_update: + // Adjust offset + MOVQ 16(R10), CX + CMPQ AX, $0x01 + JBE sequenceDecs_decode_amd64_adjust_offsetB_1_or_0 + MOVQ R12, R13 + MOVQ R11, R12 + MOVQ CX, R11 + JMP sequenceDecs_decode_amd64_after_adjust + +sequenceDecs_decode_amd64_adjust_offsetB_1_or_0: + CMPQ (R10), $0x00000000 + JNE sequenceDecs_decode_amd64_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_amd64_adjust_offset_nonzero + +sequenceDecs_decode_amd64_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_amd64_adjust_offset_nonzero + MOVQ R11, CX + JMP sequenceDecs_decode_amd64_after_adjust + +sequenceDecs_decode_amd64_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_amd64_adjust_zero + JEQ sequenceDecs_decode_amd64_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_amd64_adjust_three + JMP sequenceDecs_decode_amd64_adjust_two + +sequenceDecs_decode_amd64_adjust_zero: + MOVQ R11, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_one: + MOVQ R12, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_two: + MOVQ R13, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_three: + LEAQ -1(R11), AX + +sequenceDecs_decode_amd64_adjust_test_temp_valid: + TESTQ AX, AX + JNZ sequenceDecs_decode_amd64_adjust_temp_valid + MOVQ $0x00000001, AX + +sequenceDecs_decode_amd64_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R12, R13 + MOVQ R11, R12 + MOVQ AX, R11 + MOVQ AX, CX + +sequenceDecs_decode_amd64_after_adjust: + MOVQ CX, 16(R10) + + // Check values + MOVQ 8(R10), AX + MOVQ (R10), R14 + LEAQ (AX)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decode_amd64_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decode_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decode_amd64_match_len_ofs_ok: + ADDQ $0x18, R10 + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decode_amd64_main_loop + MOVQ s+0(FP), AX + MOVQ R11, 144(AX) + MOVQ R12, 152(AX) + MOVQ R13, 160(AX) + MOVQ br+8(FP), AX + MOVQ DX, 24(AX) + MOVB BL, 32(AX) + MOVQ SI, 8(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_amd64_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_amd64_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + +// func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: CMOV +TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32 + MOVQ br+8(FP), CX + MOVQ 24(CX), DX + MOVBQZX 32(CX), BX + MOVQ (CX), AX + MOVQ 8(CX), SI + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + MOVQ 104(AX), R10 + MOVQ s+0(FP), AX + MOVQ 144(AX), R11 + MOVQ 152(AX), R12 + MOVQ 160(AX), R13 + +sequenceDecs_decode_56_amd64_main_loop: + MOVQ (SP), R14 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decode_56_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_56_amd64_fill_end + +sequenceDecs_decode_56_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_56_amd64_fill_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decode_56_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_56_amd64_fill_byte_by_byte + +sequenceDecs_decode_56_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decode_56_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_56_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_56_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_56_amd64_of_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_56_amd64_of_update_zero: + MOVQ AX, 16(R10) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_56_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_56_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_56_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_56_amd64_ml_update_zero: + MOVQ AX, 8(R10) + + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_56_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_56_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_56_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_56_amd64_ll_update_zero: + MOVQ AX, (R10) + + // Fill bitreader for state updates + MOVQ R14, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_56_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R14 + SHRL $0x10, DI + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R14 + SHRL $0x10, R8 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R14 + SHRL $0x10, R9 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decode_56_amd64_skip_update: + // Adjust offset + MOVQ 16(R10), CX + CMPQ AX, $0x01 + JBE sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0 + MOVQ R12, R13 + MOVQ R11, R12 + MOVQ CX, R11 + JMP sequenceDecs_decode_56_amd64_after_adjust + +sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0: + CMPQ (R10), $0x00000000 + JNE sequenceDecs_decode_56_amd64_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_56_amd64_adjust_offset_nonzero + +sequenceDecs_decode_56_amd64_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_56_amd64_adjust_offset_nonzero + MOVQ R11, CX + JMP sequenceDecs_decode_56_amd64_after_adjust + +sequenceDecs_decode_56_amd64_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_56_amd64_adjust_zero + JEQ sequenceDecs_decode_56_amd64_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_56_amd64_adjust_three + JMP sequenceDecs_decode_56_amd64_adjust_two + +sequenceDecs_decode_56_amd64_adjust_zero: + MOVQ R11, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_one: + MOVQ R12, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_two: + MOVQ R13, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_three: + LEAQ -1(R11), AX + +sequenceDecs_decode_56_amd64_adjust_test_temp_valid: + TESTQ AX, AX + JNZ sequenceDecs_decode_56_amd64_adjust_temp_valid + MOVQ $0x00000001, AX + +sequenceDecs_decode_56_amd64_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R12, R13 + MOVQ R11, R12 + MOVQ AX, R11 + MOVQ AX, CX + +sequenceDecs_decode_56_amd64_after_adjust: + MOVQ CX, 16(R10) + + // Check values + MOVQ 8(R10), AX + MOVQ (R10), R14 + LEAQ (AX)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decode_56_amd64_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_56_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decode_56_amd64_match_len_ofs_ok: + ADDQ $0x18, R10 + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decode_56_amd64_main_loop + MOVQ s+0(FP), AX + MOVQ R11, 144(AX) + MOVQ R12, 152(AX) + MOVQ R13, 160(AX) + MOVQ br+8(FP), AX + MOVQ DX, 24(AX) + MOVB BL, 32(AX) + MOVQ SI, 8(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_56_amd64_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + +// func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: BMI, BMI2, CMOV +TEXT ·sequenceDecs_decode_bmi2(SB), $8-32 + MOVQ br+8(FP), BX + MOVQ 24(BX), AX + MOVBQZX 32(BX), DX + MOVQ (BX), CX + MOVQ 8(BX), BX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + MOVQ 104(CX), R9 + MOVQ s+0(FP), CX + MOVQ 144(CX), R10 + MOVQ 152(CX), R11 + MOVQ 160(CX), R12 + +sequenceDecs_decode_bmi2_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decode_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_bmi2_fill_end + +sequenceDecs_decode_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_bmi2_fill_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decode_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_bmi2_fill_byte_by_byte + +sequenceDecs_decode_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decode_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 16(R9) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 8(R9) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decode_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_bmi2_fill_2_end + +sequenceDecs_decode_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_bmi2_fill_2_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decode_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_bmi2_fill_2_byte_by_byte + +sequenceDecs_decode_bmi2_fill_2_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decode_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, (R9) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_bmi2_skip_update + LEAQ (SI)(DI*1), R14 + ADDQ R8, R14 + MOVBQZX R14, R14 + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + + // Update Offset State + BZHIQ R8, R15, CX + SHRXQ R8, R15, R15 + SHRL $0x10, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R15, CX + SHRXQ DI, R15, R15 + SHRL $0x10, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R15, CX + SHRL $0x10, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decode_bmi2_skip_update: + // Adjust offset + MOVQ 16(R9), CX + CMPQ R13, $0x01 + JBE sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0 + MOVQ R11, R12 + MOVQ R10, R11 + MOVQ CX, R10 + JMP sequenceDecs_decode_bmi2_after_adjust + +sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0: + CMPQ (R9), $0x00000000 + JNE sequenceDecs_decode_bmi2_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_bmi2_adjust_offset_nonzero + +sequenceDecs_decode_bmi2_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_bmi2_adjust_offset_nonzero + MOVQ R10, CX + JMP sequenceDecs_decode_bmi2_after_adjust + +sequenceDecs_decode_bmi2_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_bmi2_adjust_zero + JEQ sequenceDecs_decode_bmi2_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_bmi2_adjust_three + JMP sequenceDecs_decode_bmi2_adjust_two + +sequenceDecs_decode_bmi2_adjust_zero: + MOVQ R10, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_one: + MOVQ R11, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_two: + MOVQ R12, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_three: + LEAQ -1(R10), R13 + +sequenceDecs_decode_bmi2_adjust_test_temp_valid: + TESTQ R13, R13 + JNZ sequenceDecs_decode_bmi2_adjust_temp_valid + MOVQ $0x00000001, R13 + +sequenceDecs_decode_bmi2_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R11, R12 + MOVQ R10, R11 + MOVQ R13, R10 + MOVQ R13, CX + +sequenceDecs_decode_bmi2_after_adjust: + MOVQ CX, 16(R9) + + // Check values + MOVQ 8(R9), R13 + MOVQ (R9), R14 + LEAQ (R13)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ R13, $0x00020002 + JA sequenceDecs_decode_bmi2_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_bmi2_match_len_ofs_ok + TESTQ R13, R13 + JNZ sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decode_bmi2_match_len_ofs_ok: + ADDQ $0x18, R9 + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decode_bmi2_main_loop + MOVQ s+0(FP), CX + MOVQ R10, 144(CX) + MOVQ R11, 152(CX) + MOVQ R12, 160(CX) + MOVQ br+8(FP), CX + MOVQ AX, 24(CX) + MOVB DL, 32(CX) + MOVQ BX, 8(CX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_bmi2_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + +// func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: BMI, BMI2, CMOV +TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32 + MOVQ br+8(FP), BX + MOVQ 24(BX), AX + MOVBQZX 32(BX), DX + MOVQ (BX), CX + MOVQ 8(BX), BX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + MOVQ 104(CX), R9 + MOVQ s+0(FP), CX + MOVQ 144(CX), R10 + MOVQ 152(CX), R11 + MOVQ 160(CX), R12 + +sequenceDecs_decode_56_bmi2_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decode_56_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_56_bmi2_fill_end + +sequenceDecs_decode_56_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_56_bmi2_fill_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decode_56_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_56_bmi2_fill_byte_by_byte + +sequenceDecs_decode_56_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decode_56_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 16(R9) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 8(R9) + + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, (R9) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_56_bmi2_skip_update + LEAQ (SI)(DI*1), R14 + ADDQ R8, R14 + MOVBQZX R14, R14 + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + + // Update Offset State + BZHIQ R8, R15, CX + SHRXQ R8, R15, R15 + SHRL $0x10, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R15, CX + SHRXQ DI, R15, R15 + SHRL $0x10, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R15, CX + SHRL $0x10, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decode_56_bmi2_skip_update: + // Adjust offset + MOVQ 16(R9), CX + CMPQ R13, $0x01 + JBE sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0 + MOVQ R11, R12 + MOVQ R10, R11 + MOVQ CX, R10 + JMP sequenceDecs_decode_56_bmi2_after_adjust + +sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0: + CMPQ (R9), $0x00000000 + JNE sequenceDecs_decode_56_bmi2_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_56_bmi2_adjust_offset_nonzero + +sequenceDecs_decode_56_bmi2_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_56_bmi2_adjust_offset_nonzero + MOVQ R10, CX + JMP sequenceDecs_decode_56_bmi2_after_adjust + +sequenceDecs_decode_56_bmi2_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_56_bmi2_adjust_zero + JEQ sequenceDecs_decode_56_bmi2_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_56_bmi2_adjust_three + JMP sequenceDecs_decode_56_bmi2_adjust_two + +sequenceDecs_decode_56_bmi2_adjust_zero: + MOVQ R10, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_one: + MOVQ R11, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_two: + MOVQ R12, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_three: + LEAQ -1(R10), R13 + +sequenceDecs_decode_56_bmi2_adjust_test_temp_valid: + TESTQ R13, R13 + JNZ sequenceDecs_decode_56_bmi2_adjust_temp_valid + MOVQ $0x00000001, R13 + +sequenceDecs_decode_56_bmi2_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R11, R12 + MOVQ R10, R11 + MOVQ R13, R10 + MOVQ R13, CX + +sequenceDecs_decode_56_bmi2_after_adjust: + MOVQ CX, 16(R9) + + // Check values + MOVQ 8(R9), R13 + MOVQ (R9), R14 + LEAQ (R13)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ R13, $0x00020002 + JA sequenceDecs_decode_56_bmi2_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_56_bmi2_match_len_ofs_ok + TESTQ R13, R13 + JNZ sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decode_56_bmi2_match_len_ofs_ok: + ADDQ $0x18, R9 + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decode_56_bmi2_main_loop + MOVQ s+0(FP), CX + MOVQ R10, 144(CX) + MOVQ R11, 152(CX) + MOVQ R12, 160(CX) + MOVQ br+8(FP), CX + MOVQ AX, 24(CX) + MOVB DL, 32(CX) + MOVQ BX, 8(CX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_56_bmi2_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + +// func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool +// Requires: SSE +TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9 + MOVQ ctx+0(FP), R10 + MOVQ 8(R10), CX + TESTQ CX, CX + JZ empty_seqs + MOVQ (R10), AX + MOVQ 24(R10), DX + MOVQ 32(R10), BX + MOVQ 80(R10), SI + MOVQ 104(R10), DI + MOVQ 120(R10), R8 + MOVQ 56(R10), R9 + MOVQ 64(R10), R10 + ADDQ R10, R9 + + // seqsBase += 24 * seqIndex + LEAQ (DX)(DX*2), R11 + SHLQ $0x03, R11 + ADDQ R11, AX + + // outBase += outPosition + ADDQ DI, BX + +main_loop: + MOVQ (AX), R11 + MOVQ 16(AX), R12 + MOVQ 8(AX), R13 + + // Copy literals + TESTQ R11, R11 + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (SI)(R14*1), X0 + MOVUPS X0, (BX)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, R11 + JB copy_1 + ADDQ R11, SI + ADDQ R11, BX + ADDQ R11, DI + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + LEAQ (DI)(R10*1), R11 + CMPQ R12, R11 + JG error_match_off_too_big + CMPQ R12, R8 + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, R11 + SUBQ DI, R11 + JLS copy_match + MOVQ R9, R14 + SUBQ R11, R14 + CMPQ R13, R11 + JG copy_all_from_history + MOVQ R13, R11 + SUBQ $0x10, R11 + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R11 + JAE copy_4_loop + LEAQ 16(R14)(R11*1), R14 + LEAQ 16(BX)(R11*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), R11 + MOVB 2(R14), R12 + MOVW R11, (BX) + MOVB R12, 2(BX) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), R11 + MOVL -4(R14)(R13*1), R12 + MOVL R11, (BX) + MOVL R12, -4(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), R11 + MOVQ -8(R14)(R13*1), R12 + MOVQ R11, (BX) + MOVQ R12, -8(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + +copy_4_end: + ADDQ R13, DI + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + JMP loop_finished + +copy_all_from_history: + MOVQ R11, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(BX)(R15*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_5_end + +copy_5_small: + CMPQ R11, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ R11, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(R11*1), BP + MOVB R15, (BX) + MOVB BP, -1(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (BX) + MOVB BP, 2(BX) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(R11*1), BP + MOVL R15, (BX) + MOVL BP, -4(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(R11*1), BP + MOVQ R15, (BX) + MOVQ BP, -8(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + +copy_5_end: + ADDQ R11, DI + SUBQ R11, R13 + + // Copy match from the current buffer +copy_match: + MOVQ BX, R11 + SUBQ R12, R11 + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, DI + MOVQ BX, R12 + ADDQ R13, BX + +copy_2: + MOVUPS (R11), X0 + MOVUPS X0, (R12) + ADDQ $0x10, R11 + ADDQ $0x10, R12 + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, DI + +copy_slow_3: + MOVB (R11), R12 + MOVB R12, (BX) + INCQ R11 + INCQ BX + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + +loop_finished: + // Return value + MOVB $0x01, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +error_match_off_too_big: + // Return value + MOVB $0x00, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +empty_seqs: + // Return value + MOVB $0x01, ret+8(FP) + RET + +// func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool +// Requires: SSE +TEXT ·sequenceDecs_executeSimple_safe_amd64(SB), $8-9 + MOVQ ctx+0(FP), R10 + MOVQ 8(R10), CX + TESTQ CX, CX + JZ empty_seqs + MOVQ (R10), AX + MOVQ 24(R10), DX + MOVQ 32(R10), BX + MOVQ 80(R10), SI + MOVQ 104(R10), DI + MOVQ 120(R10), R8 + MOVQ 56(R10), R9 + MOVQ 64(R10), R10 + ADDQ R10, R9 + + // seqsBase += 24 * seqIndex + LEAQ (DX)(DX*2), R11 + SHLQ $0x03, R11 + ADDQ R11, AX + + // outBase += outPosition + ADDQ DI, BX + +main_loop: + MOVQ (AX), R11 + MOVQ 16(AX), R12 + MOVQ 8(AX), R13 + + // Copy literals + TESTQ R11, R11 + JZ check_offset + MOVQ R11, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (SI), X0 + MOVUPS X0, (BX) + ADDQ $0x10, SI + ADDQ $0x10, BX + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(SI)(R14*1), SI + LEAQ 16(BX)(R14*1), BX + MOVUPS -16(SI), X0 + MOVUPS X0, -16(BX) + JMP copy_1_end + +copy_1_small: + CMPQ R11, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ R11, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (SI), R14 + MOVB -1(SI)(R11*1), R15 + MOVB R14, (BX) + MOVB R15, -1(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_3: + MOVW (SI), R14 + MOVB 2(SI), R15 + MOVW R14, (BX) + MOVB R15, 2(BX) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_4through7: + MOVL (SI), R14 + MOVL -4(SI)(R11*1), R15 + MOVL R14, (BX) + MOVL R15, -4(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (SI), R14 + MOVQ -8(SI)(R11*1), R15 + MOVQ R14, (BX) + MOVQ R15, -8(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + +copy_1_end: + ADDQ R11, DI + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + LEAQ (DI)(R10*1), R11 + CMPQ R12, R11 + JG error_match_off_too_big + CMPQ R12, R8 + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, R11 + SUBQ DI, R11 + JLS copy_match + MOVQ R9, R14 + SUBQ R11, R14 + CMPQ R13, R11 + JG copy_all_from_history + MOVQ R13, R11 + SUBQ $0x10, R11 + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R11 + JAE copy_4_loop + LEAQ 16(R14)(R11*1), R14 + LEAQ 16(BX)(R11*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), R11 + MOVB 2(R14), R12 + MOVW R11, (BX) + MOVB R12, 2(BX) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), R11 + MOVL -4(R14)(R13*1), R12 + MOVL R11, (BX) + MOVL R12, -4(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), R11 + MOVQ -8(R14)(R13*1), R12 + MOVQ R11, (BX) + MOVQ R12, -8(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + +copy_4_end: + ADDQ R13, DI + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + JMP loop_finished + +copy_all_from_history: + MOVQ R11, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(BX)(R15*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_5_end + +copy_5_small: + CMPQ R11, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ R11, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(R11*1), BP + MOVB R15, (BX) + MOVB BP, -1(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (BX) + MOVB BP, 2(BX) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(R11*1), BP + MOVL R15, (BX) + MOVL BP, -4(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(R11*1), BP + MOVQ R15, (BX) + MOVQ BP, -8(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + +copy_5_end: + ADDQ R11, DI + SUBQ R11, R13 + + // Copy match from the current buffer +copy_match: + MOVQ BX, R11 + SUBQ R12, R11 + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, DI + MOVQ R13, R12 + SUBQ $0x10, R12 + JB copy_2_small + +copy_2_loop: + MOVUPS (R11), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R11 + ADDQ $0x10, BX + SUBQ $0x10, R12 + JAE copy_2_loop + LEAQ 16(R11)(R12*1), R11 + LEAQ 16(BX)(R12*1), BX + MOVUPS -16(R11), X0 + MOVUPS X0, -16(BX) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (R11), R12 + MOVB -1(R11)(R13*1), R14 + MOVB R12, (BX) + MOVB R14, -1(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_3: + MOVW (R11), R12 + MOVB 2(R11), R14 + MOVW R12, (BX) + MOVB R14, 2(BX) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_4through7: + MOVL (R11), R12 + MOVL -4(R11)(R13*1), R14 + MOVL R12, (BX) + MOVL R14, -4(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (R11), R12 + MOVQ -8(R11)(R13*1), R14 + MOVQ R12, (BX) + MOVQ R14, -8(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, DI + +copy_slow_3: + MOVB (R11), R12 + MOVB R12, (BX) + INCQ R11 + INCQ BX + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + +loop_finished: + // Return value + MOVB $0x01, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +error_match_off_too_big: + // Return value + MOVB $0x00, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + SUBQ 80(AX), SI + MOVQ SI, 112(AX) + RET + +empty_seqs: + // Return value + MOVB $0x01, ret+8(FP) + RET + +// func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: CMOV, SSE +TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32 + MOVQ br+8(FP), CX + MOVQ 24(CX), DX + MOVBQZX 32(CX), BX + MOVQ (CX), AX + MOVQ 8(CX), SI + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + XORQ CX, CX + MOVQ CX, 8(SP) + MOVQ CX, 16(SP) + MOVQ CX, 24(SP) + MOVQ 112(AX), R10 + MOVQ 128(AX), CX + MOVQ CX, 32(SP) + MOVQ 144(AX), R11 + MOVQ 136(AX), R12 + MOVQ 200(AX), CX + MOVQ CX, 56(SP) + MOVQ 176(AX), CX + MOVQ CX, 48(SP) + MOVQ 184(AX), AX + MOVQ AX, 40(SP) + MOVQ 40(SP), AX + ADDQ AX, 48(SP) + + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) + ADDQ R10, 32(SP) + + // outBase += outPosition + ADDQ R12, R10 + +sequenceDecs_decodeSync_amd64_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_amd64_fill_end + +sequenceDecs_decodeSync_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_amd64_fill_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_amd64_fill_byte_by_byte + +sequenceDecs_decodeSync_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_amd64_of_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_amd64_of_update_zero: + MOVQ AX, 8(SP) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_amd64_ml_update_zero: + MOVQ AX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_amd64_fill_2_end + +sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_amd64_fill_2_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte + +sequenceDecs_decodeSync_amd64_fill_2_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_amd64_ll_update_zero: + MOVQ AX, 24(SP) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R13 + SHRL $0x10, DI + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R13 + SHRL $0x10, R8 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R13 + SHRL $0x10, R9 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decodeSync_amd64_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ AX, $0x01 + JBE sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_amd64_after_adjust + +sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_amd64_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_amd64_adjust_offset_nonzero + +sequenceDecs_decodeSync_amd64_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_amd64_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_amd64_after_adjust + +sequenceDecs_decodeSync_amd64_adjust_offset_nonzero: + MOVQ R13, AX + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, AX + CMOVQEQ R15, R14 + ADDQ 144(CX)(AX*8), R14 + JNZ sequenceDecs_decodeSync_amd64_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_amd64_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_amd64_adjust_skip + MOVQ 152(CX), AX + MOVQ AX, 160(CX) + +sequenceDecs_decodeSync_amd64_adjust_skip: + MOVQ 144(CX), AX + MOVQ AX, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_amd64_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), AX + MOVQ 24(SP), CX + LEAQ (AX)(CX*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ CX, 104(R14) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decodeSync_amd64_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_amd64_match_len_ofs_ok: + MOVQ 24(SP), AX + MOVQ 8(SP), CX + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (AX)(R13*1), R14 + ADDQ R10, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ AX, AX + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (R11)(R14*1), X0 + MOVUPS X0, (R10)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, AX + JB copy_1 + ADDQ AX, R11 + ADDQ AX, R10 + ADDQ AX, R12 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R12, AX + ADDQ 40(SP), AX + CMPQ CX, AX + JG error_match_off_too_big + CMPQ CX, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ CX, AX + SUBQ R12, AX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ AX, R14 + CMPQ R13, AX + JG copy_all_from_history + MOVQ R13, AX + SUBQ $0x10, AX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, AX + JAE copy_4_loop + LEAQ 16(R14)(AX*1), R14 + LEAQ 16(R10)(AX*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), AX + MOVB 2(R14), CL + MOVW AX, (R10) + MOVB CL, 2(R10) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), AX + MOVL -4(R14)(R13*1), CX + MOVL AX, (R10) + MOVL CX, -4(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), AX + MOVQ -8(R14)(R13*1), CX + MOVQ AX, (R10) + MOVQ CX, -8(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + +copy_4_end: + ADDQ R13, R12 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ AX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R10)(R15*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_5_end + +copy_5_small: + CMPQ AX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ AX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(AX*1), BP + MOVB R15, (R10) + MOVB BP, -1(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R10) + MOVB BP, 2(R10) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(AX*1), BP + MOVL R15, (R10) + MOVL BP, -4(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(AX*1), BP + MOVQ R15, (R10) + MOVQ BP, -8(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + +copy_5_end: + ADDQ AX, R12 + SUBQ AX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R10, AX + SUBQ CX, AX + + // ml <= mo + CMPQ R13, CX + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R12 + MOVQ R10, CX + ADDQ R13, R10 + +copy_2: + MOVUPS (AX), X0 + MOVUPS X0, (CX) + ADDQ $0x10, AX + ADDQ $0x10, CX + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R12 + +copy_slow_3: + MOVB (AX), CL + MOVB CL, (R10) + INCQ AX + INCQ R10 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decodeSync_amd64_main_loop + +loop_finished: + MOVQ br+8(FP), AX + MOVQ DX, 24(AX) + MOVB BL, 32(AX) + MOVQ SI, 8(AX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R12, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R11 + MOVQ R11, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_amd64_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: BMI, BMI2, CMOV, SSE +TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32 + MOVQ br+8(FP), BX + MOVQ 24(BX), AX + MOVBQZX 32(BX), DX + MOVQ (BX), CX + MOVQ 8(BX), BX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + XORQ R9, R9 + MOVQ R9, 8(SP) + MOVQ R9, 16(SP) + MOVQ R9, 24(SP) + MOVQ 112(CX), R9 + MOVQ 128(CX), R10 + MOVQ R10, 32(SP) + MOVQ 144(CX), R10 + MOVQ 136(CX), R11 + MOVQ 200(CX), R12 + MOVQ R12, 56(SP) + MOVQ 176(CX), R12 + MOVQ R12, 48(SP) + MOVQ 184(CX), CX + MOVQ CX, 40(SP) + MOVQ 40(SP), CX + ADDQ CX, 48(SP) + + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) + ADDQ R9, 32(SP) + + // outBase += outPosition + ADDQ R11, R9 + +sequenceDecs_decodeSync_bmi2_main_loop: + MOVQ (SP), R12 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_bmi2_fill_end + +sequenceDecs_decodeSync_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_bmi2_fill_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_bmi2_fill_byte_by_byte + +sequenceDecs_decodeSync_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 8(SP) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_bmi2_fill_2_end + +sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_bmi2_fill_2_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte + +sequenceDecs_decodeSync_bmi2_fill_2_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 24(SP) + + // Fill bitreader for state updates + MOVQ R12, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R12 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_bmi2_skip_update + LEAQ (SI)(DI*1), R13 + ADDQ R8, R13 + MOVBQZX R13, R13 + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + + // Update Offset State + BZHIQ R8, R14, CX + SHRXQ R8, R14, R14 + SHRL $0x10, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R14, CX + SHRXQ DI, R14, R14 + SHRL $0x10, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R14, CX + SHRL $0x10, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decodeSync_bmi2_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ R12, $0x01 + JBE sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_bmi2_after_adjust + +sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero + +sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_bmi2_after_adjust + +sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero: + MOVQ R13, R12 + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, R12 + CMOVQEQ R15, R14 + ADDQ 144(CX)(R12*8), R14 + JNZ sequenceDecs_decodeSync_bmi2_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_bmi2_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_bmi2_adjust_skip + MOVQ 152(CX), R12 + MOVQ R12, 160(CX) + +sequenceDecs_decodeSync_bmi2_adjust_skip: + MOVQ 144(CX), R12 + MOVQ R12, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_bmi2_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), CX + MOVQ 24(SP), R12 + LEAQ (CX)(R12*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ R12, 104(R14) + JS error_not_enough_literals + CMPQ CX, $0x00020002 + JA sequenceDecs_decodeSync_bmi2_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_bmi2_match_len_ofs_ok + TESTQ CX, CX + JNZ sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_bmi2_match_len_ofs_ok: + MOVQ 24(SP), CX + MOVQ 8(SP), R12 + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (CX)(R13*1), R14 + ADDQ R9, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ CX, CX + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (R10)(R14*1), X0 + MOVUPS X0, (R9)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, CX + JB copy_1 + ADDQ CX, R10 + ADDQ CX, R9 + ADDQ CX, R11 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R11, CX + ADDQ 40(SP), CX + CMPQ R12, CX + JG error_match_off_too_big + CMPQ R12, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, CX + SUBQ R11, CX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ CX, R14 + CMPQ R13, CX + JG copy_all_from_history + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, CX + JAE copy_4_loop + LEAQ 16(R14)(CX*1), R14 + LEAQ 16(R9)(CX*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), CX + MOVB 2(R14), R12 + MOVW CX, (R9) + MOVB R12, 2(R9) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), CX + MOVL -4(R14)(R13*1), R12 + MOVL CX, (R9) + MOVL R12, -4(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), CX + MOVQ -8(R14)(R13*1), R12 + MOVQ CX, (R9) + MOVQ R12, -8(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + +copy_4_end: + ADDQ R13, R11 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ CX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R9)(R15*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_5_end + +copy_5_small: + CMPQ CX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ CX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(CX*1), BP + MOVB R15, (R9) + MOVB BP, -1(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R9) + MOVB BP, 2(R9) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(CX*1), BP + MOVL R15, (R9) + MOVL BP, -4(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(CX*1), BP + MOVQ R15, (R9) + MOVQ BP, -8(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + +copy_5_end: + ADDQ CX, R11 + SUBQ CX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R9, CX + SUBQ R12, CX + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R11 + MOVQ R9, R12 + ADDQ R13, R9 + +copy_2: + MOVUPS (CX), X0 + MOVUPS X0, (R12) + ADDQ $0x10, CX + ADDQ $0x10, R12 + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R11 + +copy_slow_3: + MOVB (CX), R12 + MOVB R12, (R9) + INCQ CX + INCQ R9 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decodeSync_bmi2_main_loop + +loop_finished: + MOVQ br+8(FP), CX + MOVQ AX, 24(CX) + MOVB DL, 32(CX) + MOVQ BX, 8(CX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R11, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R10 + MOVQ R10, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_bmi2_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: CMOV, SSE +TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32 + MOVQ br+8(FP), CX + MOVQ 24(CX), DX + MOVBQZX 32(CX), BX + MOVQ (CX), AX + MOVQ 8(CX), SI + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + XORQ CX, CX + MOVQ CX, 8(SP) + MOVQ CX, 16(SP) + MOVQ CX, 24(SP) + MOVQ 112(AX), R10 + MOVQ 128(AX), CX + MOVQ CX, 32(SP) + MOVQ 144(AX), R11 + MOVQ 136(AX), R12 + MOVQ 200(AX), CX + MOVQ CX, 56(SP) + MOVQ 176(AX), CX + MOVQ CX, 48(SP) + MOVQ 184(AX), AX + MOVQ AX, 40(SP) + MOVQ 40(SP), AX + ADDQ AX, 48(SP) + + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) + ADDQ R10, 32(SP) + + // outBase += outPosition + ADDQ R12, R10 + +sequenceDecs_decodeSync_safe_amd64_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_safe_amd64_fill_end + +sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_safe_amd64_fill_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_safe_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte + +sequenceDecs_decodeSync_safe_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_safe_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_safe_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_safe_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_safe_amd64_of_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_safe_amd64_of_update_zero: + MOVQ AX, 8(SP) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_safe_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_safe_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_safe_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_safe_amd64_ml_update_zero: + MOVQ AX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_safe_amd64_fill_2_end + +sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte + +sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread: + CMPQ BX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_safe_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_safe_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_safe_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_safe_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_safe_amd64_ll_update_zero: + MOVQ AX, 24(SP) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_safe_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R13 + SHRL $0x10, DI + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R13 + SHRL $0x10, R8 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R13 + SHRL $0x10, R9 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decodeSync_safe_amd64_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ AX, $0x01 + JBE sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_safe_amd64_after_adjust + +sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero + +sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_safe_amd64_after_adjust + +sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero: + MOVQ R13, AX + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, AX + CMOVQEQ R15, R14 + ADDQ 144(CX)(AX*8), R14 + JNZ sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_safe_amd64_adjust_skip + MOVQ 152(CX), AX + MOVQ AX, 160(CX) + +sequenceDecs_decodeSync_safe_amd64_adjust_skip: + MOVQ 144(CX), AX + MOVQ AX, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_safe_amd64_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), AX + MOVQ 24(SP), CX + LEAQ (AX)(CX*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ CX, 104(R14) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok: + MOVQ 24(SP), AX + MOVQ 8(SP), CX + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (AX)(R13*1), R14 + ADDQ R10, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ AX, AX + JZ check_offset + MOVQ AX, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (R11), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R11 + ADDQ $0x10, R10 + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(R11)(R14*1), R11 + LEAQ 16(R10)(R14*1), R10 + MOVUPS -16(R11), X0 + MOVUPS X0, -16(R10) + JMP copy_1_end + +copy_1_small: + CMPQ AX, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ AX, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (R11), R14 + MOVB -1(R11)(AX*1), R15 + MOVB R14, (R10) + MOVB R15, -1(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_3: + MOVW (R11), R14 + MOVB 2(R11), R15 + MOVW R14, (R10) + MOVB R15, 2(R10) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_4through7: + MOVL (R11), R14 + MOVL -4(R11)(AX*1), R15 + MOVL R14, (R10) + MOVL R15, -4(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (R11), R14 + MOVQ -8(R11)(AX*1), R15 + MOVQ R14, (R10) + MOVQ R15, -8(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + +copy_1_end: + ADDQ AX, R12 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R12, AX + ADDQ 40(SP), AX + CMPQ CX, AX + JG error_match_off_too_big + CMPQ CX, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ CX, AX + SUBQ R12, AX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ AX, R14 + CMPQ R13, AX + JG copy_all_from_history + MOVQ R13, AX + SUBQ $0x10, AX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, AX + JAE copy_4_loop + LEAQ 16(R14)(AX*1), R14 + LEAQ 16(R10)(AX*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), AX + MOVB 2(R14), CL + MOVW AX, (R10) + MOVB CL, 2(R10) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), AX + MOVL -4(R14)(R13*1), CX + MOVL AX, (R10) + MOVL CX, -4(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), AX + MOVQ -8(R14)(R13*1), CX + MOVQ AX, (R10) + MOVQ CX, -8(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + +copy_4_end: + ADDQ R13, R12 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ AX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R10)(R15*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_5_end + +copy_5_small: + CMPQ AX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ AX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(AX*1), BP + MOVB R15, (R10) + MOVB BP, -1(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R10) + MOVB BP, 2(R10) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(AX*1), BP + MOVL R15, (R10) + MOVL BP, -4(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(AX*1), BP + MOVQ R15, (R10) + MOVQ BP, -8(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + +copy_5_end: + ADDQ AX, R12 + SUBQ AX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R10, AX + SUBQ CX, AX + + // ml <= mo + CMPQ R13, CX + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R12 + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_2_small + +copy_2_loop: + MOVUPS (AX), X0 + MOVUPS X0, (R10) + ADDQ $0x10, AX + ADDQ $0x10, R10 + SUBQ $0x10, CX + JAE copy_2_loop + LEAQ 16(AX)(CX*1), AX + LEAQ 16(R10)(CX*1), R10 + MOVUPS -16(AX), X0 + MOVUPS X0, -16(R10) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (AX), CL + MOVB -1(AX)(R13*1), R14 + MOVB CL, (R10) + MOVB R14, -1(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_3: + MOVW (AX), CX + MOVB 2(AX), R14 + MOVW CX, (R10) + MOVB R14, 2(R10) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_4through7: + MOVL (AX), CX + MOVL -4(AX)(R13*1), R14 + MOVL CX, (R10) + MOVL R14, -4(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (AX), CX + MOVQ -8(AX)(R13*1), R14 + MOVQ CX, (R10) + MOVQ R14, -8(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R12 + +copy_slow_3: + MOVB (AX), CL + MOVB CL, (R10) + INCQ AX + INCQ R10 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decodeSync_safe_amd64_main_loop + +loop_finished: + MOVQ br+8(FP), AX + MOVQ DX, 24(AX) + MOVB BL, 32(AX) + MOVQ SI, 8(AX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R12, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R11 + MOVQ R11, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: BMI, BMI2, CMOV, SSE +TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32 + MOVQ br+8(FP), BX + MOVQ 24(BX), AX + MOVBQZX 32(BX), DX + MOVQ (BX), CX + MOVQ 8(BX), BX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + XORQ R9, R9 + MOVQ R9, 8(SP) + MOVQ R9, 16(SP) + MOVQ R9, 24(SP) + MOVQ 112(CX), R9 + MOVQ 128(CX), R10 + MOVQ R10, 32(SP) + MOVQ 144(CX), R10 + MOVQ 136(CX), R11 + MOVQ 200(CX), R12 + MOVQ R12, 56(SP) + MOVQ 176(CX), R12 + MOVQ R12, 48(SP) + MOVQ 184(CX), CX + MOVQ CX, 40(SP) + MOVQ 40(SP), CX + ADDQ CX, 48(SP) + + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) + ADDQ R9, 32(SP) + + // outBase += outPosition + ADDQ R11, R9 + +sequenceDecs_decodeSync_safe_bmi2_main_loop: + MOVQ (SP), R12 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_end + +sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte + +sequenceDecs_decodeSync_safe_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_safe_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 8(SP) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_end + +sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte + +sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread: + CMPQ DX, $0x40 + JA error_overread + +sequenceDecs_decodeSync_safe_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 24(SP) + + // Fill bitreader for state updates + MOVQ R12, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R12 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_safe_bmi2_skip_update + LEAQ (SI)(DI*1), R13 + ADDQ R8, R13 + MOVBQZX R13, R13 + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + + // Update Offset State + BZHIQ R8, R14, CX + SHRXQ R8, R14, R14 + SHRL $0x10, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R14, CX + SHRXQ DI, R14, R14 + SHRL $0x10, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R14, CX + SHRL $0x10, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decodeSync_safe_bmi2_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ R12, $0x01 + JBE sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust + +sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero + +sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust + +sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero: + MOVQ R13, R12 + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, R12 + CMOVQEQ R15, R14 + ADDQ 144(CX)(R12*8), R14 + JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_safe_bmi2_adjust_skip + MOVQ 152(CX), R12 + MOVQ R12, 160(CX) + +sequenceDecs_decodeSync_safe_bmi2_adjust_skip: + MOVQ 144(CX), R12 + MOVQ R12, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_safe_bmi2_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), CX + MOVQ 24(SP), R12 + LEAQ (CX)(R12*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ R12, 104(R14) + JS error_not_enough_literals + CMPQ CX, $0x00020002 + JA sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok + TESTQ CX, CX + JNZ sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok: + MOVQ 24(SP), CX + MOVQ 8(SP), R12 + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (CX)(R13*1), R14 + ADDQ R9, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ CX, CX + JZ check_offset + MOVQ CX, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (R10), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R10 + ADDQ $0x10, R9 + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(R10)(R14*1), R10 + LEAQ 16(R9)(R14*1), R9 + MOVUPS -16(R10), X0 + MOVUPS X0, -16(R9) + JMP copy_1_end + +copy_1_small: + CMPQ CX, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ CX, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (R10), R14 + MOVB -1(R10)(CX*1), R15 + MOVB R14, (R9) + MOVB R15, -1(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_3: + MOVW (R10), R14 + MOVB 2(R10), R15 + MOVW R14, (R9) + MOVB R15, 2(R9) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_4through7: + MOVL (R10), R14 + MOVL -4(R10)(CX*1), R15 + MOVL R14, (R9) + MOVL R15, -4(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (R10), R14 + MOVQ -8(R10)(CX*1), R15 + MOVQ R14, (R9) + MOVQ R15, -8(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + +copy_1_end: + ADDQ CX, R11 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R11, CX + ADDQ 40(SP), CX + CMPQ R12, CX + JG error_match_off_too_big + CMPQ R12, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, CX + SUBQ R11, CX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ CX, R14 + CMPQ R13, CX + JG copy_all_from_history + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, CX + JAE copy_4_loop + LEAQ 16(R14)(CX*1), R14 + LEAQ 16(R9)(CX*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), CX + MOVB 2(R14), R12 + MOVW CX, (R9) + MOVB R12, 2(R9) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), CX + MOVL -4(R14)(R13*1), R12 + MOVL CX, (R9) + MOVL R12, -4(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), CX + MOVQ -8(R14)(R13*1), R12 + MOVQ CX, (R9) + MOVQ R12, -8(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + +copy_4_end: + ADDQ R13, R11 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ CX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R9)(R15*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_5_end + +copy_5_small: + CMPQ CX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ CX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(CX*1), BP + MOVB R15, (R9) + MOVB BP, -1(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R9) + MOVB BP, 2(R9) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(CX*1), BP + MOVL R15, (R9) + MOVL BP, -4(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(CX*1), BP + MOVQ R15, (R9) + MOVQ BP, -8(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + +copy_5_end: + ADDQ CX, R11 + SUBQ CX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R9, CX + SUBQ R12, CX + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R11 + MOVQ R13, R12 + SUBQ $0x10, R12 + JB copy_2_small + +copy_2_loop: + MOVUPS (CX), X0 + MOVUPS X0, (R9) + ADDQ $0x10, CX + ADDQ $0x10, R9 + SUBQ $0x10, R12 + JAE copy_2_loop + LEAQ 16(CX)(R12*1), CX + LEAQ 16(R9)(R12*1), R9 + MOVUPS -16(CX), X0 + MOVUPS X0, -16(R9) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (CX), R12 + MOVB -1(CX)(R13*1), R14 + MOVB R12, (R9) + MOVB R14, -1(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_3: + MOVW (CX), R12 + MOVB 2(CX), R14 + MOVW R12, (R9) + MOVB R14, 2(R9) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_4through7: + MOVL (CX), R12 + MOVL -4(CX)(R13*1), R14 + MOVL R12, (R9) + MOVL R14, -4(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (CX), R12 + MOVQ -8(CX)(R13*1), R14 + MOVQ R12, (R9) + MOVQ R14, -8(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R11 + +copy_slow_3: + MOVB (CX), R12 + MOVB R12, (R9) + INCQ CX + INCQ R9 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decodeSync_safe_bmi2_main_loop + +loop_finished: + MOVQ br+8(FP), CX + MOVQ AX, 24(CX) + MOVB DL, 32(CX) + MOVQ BX, 8(CX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R11, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R10 + MOVQ R10, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go new file mode 100644 index 00000000..2fb35b78 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go @@ -0,0 +1,237 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +package zstd + +import ( + "fmt" + "io" +) + +// decode sequences from the stream with the provided history but without dictionary. +func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { + return false, nil +} + +// decode sequences from the stream without the provided history. +func (s *sequenceDecs) decode(seqs []seqVals) error { + br := s.br + + // Grab full sizes tables, to avoid bounds checks. + llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] + llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + s.seqSize = 0 + litRemain := len(s.literals) + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + for i := range seqs { + var ll, mo, ml int + if len(br.in) > 4+((maxOffsetBits+16+16)>>3) { + // inlined function: + // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) + + // Final will not read from stream. + var llB, mlB, moB uint8 + ll, llB = llState.final() + ml, mlB = mlState.final() + mo, moB = ofState.final() + + // extra bits are stored in reverse order. + br.fillFast() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fillFast() + } + ml += br.getBits(mlB) + ll += br.getBits(llB) + + if moB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = mo + } else { + // mo = s.adjustOffset(mo, ll, moB) + // Inlined for rather big speedup + if ll == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + mo++ + } + + if mo == 0 { + mo = s.prevOffset[0] + } else { + var temp int + if mo == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[mo] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("WARNING: temp was 0") + temp = 1 + } + + if mo != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + mo = temp + } + } + br.fillFast() + } else { + if br.overread() { + if debugDecoder { + printf("reading sequence %d, exceeded available data\n", i) + } + return io.ErrUnexpectedEOF + } + ll, mo, ml = s.next(br, llState, mlState, ofState) + br.fill() + } + + if debugSequences { + println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) + } + // Evaluate. + // We might be doing this async, so do it early. + if mo == 0 && ml > 0 { + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + } + if ml > maxMatchLen { + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + } + s.seqSize += ll + ml + if s.seqSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + litRemain -= ll + if litRemain < 0 { + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll) + } + seqs[i] = seqVals{ + ll: ll, + ml: ml, + mo: mo, + } + if i == len(seqs)-1 { + // This is the last sequence, so we shouldn't update state. + break + } + + // Manually inlined, ~ 5-20% faster + // Update all 3 states at once. Approx 20% faster. + nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() + if nBits == 0 { + llState = llTable[llState.newState()&maxTableMask] + mlState = mlTable[mlState.newState()&maxTableMask] + ofState = ofTable[ofState.newState()&maxTableMask] + } else { + bits := br.get32BitsFast(nBits) + lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) + llState = llTable[(llState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits >> (ofState.nbBits() & 31)) + lowBits &= bitMask[mlState.nbBits()&15] + mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] + ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] + } + } + s.seqSize += litRemain + if s.seqSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + } + return err +} + +// executeSimple handles cases when a dictionary is not used. +func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { + // Ensure we have enough output size... + if len(s.out)+s.seqSize > cap(s.out) { + addBytes := s.seqSize + len(s.out) + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + for _, seq := range seqs { + // Add literals + copy(out[t:], s.literals[:seq.ll]) + t += seq.ll + s.literals = s.literals[seq.ll:] + + // Malformed input + if seq.mo > t+len(hist) || seq.mo > s.windowSize { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) + } + + // Copy from history. + if v := seq.mo - t; v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if seq.ml > v { + // Some goes into the current block. + // Copy remainder of history + copy(out[t:], hist[start:]) + t += v + seq.ml -= v + } else { + copy(out[t:], hist[start:start+seq.ml]) + t += seq.ml + continue + } + } + + // We must be in the current buffer now + if seq.ml > 0 { + start := t - seq.mo + if seq.ml <= t-start { + // No overlap + copy(out[t:], out[start:start+seq.ml]) + t += seq.ml + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + src := out[start : start+seq.ml] + dst := out[t:] + dst = dst[:len(src)] + t += len(src) + // Destination is the space we just added. + for i := range src { + dst[i] = src[i] + } + } + } + } + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqenc.go b/vendor/github.com/klauspost/compress/zstd/seqenc.go new file mode 100644 index 00000000..8014174a --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqenc.go @@ -0,0 +1,114 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "math/bits" + +type seqCoders struct { + llEnc, ofEnc, mlEnc *fseEncoder + llPrev, ofPrev, mlPrev *fseEncoder +} + +// swap coders with another (block). +func (s *seqCoders) swap(other *seqCoders) { + *s, *other = *other, *s +} + +// setPrev will update the previous encoders to the actually used ones +// and make sure a fresh one is in the main slot. +func (s *seqCoders) setPrev(ll, ml, of *fseEncoder) { + compareSwap := func(used *fseEncoder, current, prev **fseEncoder) { + // We used the new one, more current to history and reuse the previous history + if *current == used { + *prev, *current = *current, *prev + c := *current + p := *prev + c.reUsed = false + p.reUsed = true + return + } + if used == *prev { + return + } + // Ensure we cannot reuse by accident + prevEnc := *prev + prevEnc.symbolLen = 0 + } + compareSwap(ll, &s.llEnc, &s.llPrev) + compareSwap(ml, &s.mlEnc, &s.mlPrev) + compareSwap(of, &s.ofEnc, &s.ofPrev) +} + +func highBit(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} + +var llCodeTable = [64]byte{0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 16, 17, 17, 18, 18, 19, 19, + 20, 20, 20, 20, 21, 21, 21, 21, + 22, 22, 22, 22, 22, 22, 22, 22, + 23, 23, 23, 23, 23, 23, 23, 23, + 24, 24, 24, 24, 24, 24, 24, 24, + 24, 24, 24, 24, 24, 24, 24, 24} + +// Up to 6 bits +const maxLLCode = 35 + +// llBitsTable translates from ll code to number of bits. +var llBitsTable = [maxLLCode + 1]byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 2, 2, 3, 3, + 4, 6, 7, 8, 9, 10, 11, 12, + 13, 14, 15, 16} + +// llCode returns the code that represents the literal length requested. +func llCode(litLength uint32) uint8 { + const llDeltaCode = 19 + if litLength <= 63 { + // Compiler insists on bounds check (Go 1.12) + return llCodeTable[litLength&63] + } + return uint8(highBit(litLength)) + llDeltaCode +} + +var mlCodeTable = [128]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, + 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, + 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, + 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42} + +// Up to 6 bits +const maxMLCode = 52 + +// mlBitsTable translates from ml code to number of bits. +var mlBitsTable = [maxMLCode + 1]byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 2, 2, 3, 3, + 4, 4, 5, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16} + +// note : mlBase = matchLength - MINMATCH; +// because it's the format it's stored in seqStore->sequences +func mlCode(mlBase uint32) uint8 { + const mlDeltaCode = 36 + if mlBase <= 127 { + // Compiler insists on bounds check (Go 1.12) + return mlCodeTable[mlBase&127] + } + return uint8(highBit(mlBase)) + mlDeltaCode +} + +func ofCode(offset uint32) uint8 { + // A valid offset will always be > 0. + return uint8(bits.Len32(offset) - 1) +} diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go new file mode 100644 index 00000000..ec13594e --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/snappy.go @@ -0,0 +1,434 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "errors" + "hash/crc32" + "io" + + "github.com/klauspost/compress/huff0" + snappy "github.com/klauspost/compress/internal/snapref" +) + +const ( + snappyTagLiteral = 0x00 + snappyTagCopy1 = 0x01 + snappyTagCopy2 = 0x02 + snappyTagCopy4 = 0x03 +) + +const ( + snappyChecksumSize = 4 + snappyMagicBody = "sNaPpY" + + // snappyMaxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + snappyMaxBlockSize = 65536 + + // snappyMaxEncodedLenOfMaxBlockSize equals MaxEncodedLen(snappyMaxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + snappyMaxEncodedLenOfMaxBlockSize = 76490 +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var ( + // ErrSnappyCorrupt reports that the input is invalid. + ErrSnappyCorrupt = errors.New("snappy: corrupt input") + // ErrSnappyTooLarge reports that the uncompressed length is too large. + ErrSnappyTooLarge = errors.New("snappy: decoded block is too large") + // ErrSnappyUnsupported reports that the input isn't supported. + ErrSnappyUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// SnappyConverter can read SnappyConverter-compressed streams and convert them to zstd. +// Conversion is done by converting the stream directly from Snappy without intermediate +// full decoding. +// Therefore the compression ratio is much less than what can be done by a full decompression +// and compression, and a faulty Snappy stream may lead to a faulty Zstandard stream without +// any errors being generated. +// No CRC value is being generated and not all CRC values of the Snappy stream are checked. +// However, it provides really fast recompression of Snappy streams. +// The converter can be reused to avoid allocations, even after errors. +type SnappyConverter struct { + r io.Reader + err error + buf []byte + block *blockEnc +} + +// Convert the Snappy stream supplied in 'in' and write the zStandard stream to 'w'. +// If any error is detected on the Snappy stream it is returned. +// The number of bytes written is returned. +func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { + initPredefined() + r.err = nil + r.r = in + if r.block == nil { + r.block = &blockEnc{} + r.block.init() + } + r.block.initNewEncode() + if len(r.buf) != snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize { + r.buf = make([]byte, snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize) + } + r.block.litEnc.Reuse = huff0.ReusePolicyNone + var written int64 + var readHeader bool + { + header := frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0]) + + var n int + n, r.err = w.Write(header) + if r.err != nil { + return written, r.err + } + written += int64(n) + } + + for { + if !r.readFull(r.buf[:4], true) { + // Add empty last block + r.block.reset(nil) + r.block.last = true + err := r.block.encodeLits(r.block.literals, false) + if err != nil { + return written, err + } + n, err := w.Write(r.block.output) + if err != nil { + return written, err + } + written += int64(n) + + return written, r.err + } + chunkType := r.buf[0] + if !readHeader { + if chunkType != chunkTypeStreamIdentifier { + println("chunkType != chunkTypeStreamIdentifier", chunkType) + r.err = ErrSnappyCorrupt + return written, r.err + } + readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + println("chunkLen > len(r.buf)", chunkType) + r.err = ErrSnappyUnsupported + return written, r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < snappyChecksumSize { + println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return written, r.err + } + //checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[snappyChecksumSize:] + + n, hdr, err := snappyDecodedLen(buf) + if err != nil { + r.err = err + return written, r.err + } + buf = buf[hdr:] + if n > snappyMaxBlockSize { + println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.reset(nil) + r.block.pushOffsets() + if err := decodeSnappy(r.block, buf); err != nil { + r.err = err + return written, r.err + } + if r.block.size+r.block.extraLits != n { + printf("invalid size, want %d, got %d\n", n, r.block.size+r.block.extraLits) + r.err = ErrSnappyCorrupt + return written, r.err + } + err = r.block.encode(nil, false, false) + switch err { + case errIncompressible: + r.block.popOffsets() + r.block.reset(nil) + r.block.literals, err = snappy.Decode(r.block.literals[:n], r.buf[snappyChecksumSize:chunkLen]) + if err != nil { + return written, err + } + err = r.block.encodeLits(r.block.literals, false) + if err != nil { + return written, err + } + case nil: + default: + return written, err + } + + n, r.err = w.Write(r.block.output) + if r.err != nil { + return written, err + } + written += int64(n) + continue + case chunkTypeUncompressedData: + if debugEncoder { + println("Uncompressed, chunklen", chunkLen) + } + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < snappyChecksumSize { + println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.reset(nil) + buf := r.buf[:snappyChecksumSize] + if !r.readFull(buf, false) { + return written, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - snappyChecksumSize + if n > snappyMaxBlockSize { + println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.literals = r.block.literals[:n] + if !r.readFull(r.block.literals, false) { + return written, r.err + } + if snappyCRC(r.block.literals) != checksum { + println("literals crc mismatch") + r.err = ErrSnappyCorrupt + return written, r.err + } + err := r.block.encodeLits(r.block.literals, false) + if err != nil { + return written, err + } + n, r.err = w.Write(r.block.output) + if r.err != nil { + return written, err + } + written += int64(n) + continue + + case chunkTypeStreamIdentifier: + if debugEncoder { + println("stream id", chunkLen, len(snappyMagicBody)) + } + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(snappyMagicBody) { + println("chunkLen != len(snappyMagicBody)", chunkLen, len(snappyMagicBody)) + r.err = ErrSnappyCorrupt + return written, r.err + } + if !r.readFull(r.buf[:len(snappyMagicBody)], false) { + return written, r.err + } + for i := 0; i < len(snappyMagicBody); i++ { + if r.buf[i] != snappyMagicBody[i] { + println("r.buf[i] != snappyMagicBody[i]", r.buf[i], snappyMagicBody[i], i) + r.err = ErrSnappyCorrupt + return written, r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + println("chunkType <= 0x7f") + r.err = ErrSnappyUnsupported + return written, r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return written, r.err + } + } +} + +// decodeSnappy writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read. +func decodeSnappy(blk *blockEnc, src []byte) error { + //decodeRef(make([]byte, snappyMaxBlockSize), src) + var s, length int + lits := blk.extraLits + var offset uint32 + for s < len(src) { + switch src[s] & 0x03 { + case snappyTagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + if x > snappyMaxBlockSize { + println("x > snappyMaxBlockSize", x, snappyMaxBlockSize) + return ErrSnappyCorrupt + } + length = int(x) + 1 + if length <= 0 { + println("length <= 0 ", length) + + return errUnsupportedLiteralLength + } + //if length > snappyMaxBlockSize-d || uint32(length) > len(src)-s { + // return ErrSnappyCorrupt + //} + + blk.literals = append(blk.literals, src[s:s+length]...) + //println(length, "litLen") + lits += length + s += length + continue + + case snappyTagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]) + + case snappyTagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = uint32(src[s-2]) | uint32(src[s-1])<<8 + + case snappyTagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + + if offset <= 0 || blk.size+lits < int(offset) /*|| length > len(blk)-d */ { + println("offset <= 0 || blk.size+lits < int(offset)", offset, blk.size+lits, int(offset), blk.size, lits) + + return ErrSnappyCorrupt + } + + // Check if offset is one of the recent offsets. + // Adjusts the output offset accordingly. + // Gives a tiny bit of compression, typically around 1%. + if false { + offset = blk.matchOffset(offset, uint32(lits)) + } else { + offset += 3 + } + + blk.sequences = append(blk.sequences, seq{ + litLen: uint32(lits), + offset: offset, + matchLen: uint32(length) - zstdMinMatch, + }) + blk.size += length + lits + lits = 0 + } + blk.extraLits = lits + return nil +} + +func (r *SnappyConverter) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrSnappyCorrupt + } + return false + } + return true +} + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func snappyCRC(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return c>>15 | c<<17 + 0xa282ead8 +} + +// snappyDecodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func snappyDecodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrSnappyCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrSnappyTooLarge + } + return int(v), n, nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/zip.go b/vendor/github.com/klauspost/compress/zstd/zip.go new file mode 100644 index 00000000..29c15c8c --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/zip.go @@ -0,0 +1,141 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +import ( + "errors" + "io" + "sync" +) + +// ZipMethodWinZip is the method for Zstandard compressed data inside Zip files for WinZip. +// See https://www.winzip.com/win/en/comp_info.html +const ZipMethodWinZip = 93 + +// ZipMethodPKWare is the original method number used by PKWARE to indicate Zstandard compression. +// Deprecated: This has been deprecated by PKWARE, use ZipMethodWinZip instead for compression. +// See https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.9.TXT +const ZipMethodPKWare = 20 + +// zipReaderPool is the default reader pool. +var zipReaderPool = sync.Pool{New: func() interface{} { + z, err := NewReader(nil, WithDecoderLowmem(true), WithDecoderMaxWindow(128<<20), WithDecoderConcurrency(1)) + if err != nil { + panic(err) + } + return z +}} + +// newZipReader creates a pooled zip decompressor. +func newZipReader(opts ...DOption) func(r io.Reader) io.ReadCloser { + pool := &zipReaderPool + if len(opts) > 0 { + opts = append([]DOption{WithDecoderLowmem(true), WithDecoderMaxWindow(128 << 20)}, opts...) + // Force concurrency 1 + opts = append(opts, WithDecoderConcurrency(1)) + // Create our own pool + pool = &sync.Pool{} + } + return func(r io.Reader) io.ReadCloser { + dec, ok := pool.Get().(*Decoder) + if ok { + dec.Reset(r) + } else { + d, err := NewReader(r, opts...) + if err != nil { + panic(err) + } + dec = d + } + return &pooledZipReader{dec: dec, pool: pool} + } +} + +type pooledZipReader struct { + mu sync.Mutex // guards Close and Read + pool *sync.Pool + dec *Decoder +} + +func (r *pooledZipReader) Read(p []byte) (n int, err error) { + r.mu.Lock() + defer r.mu.Unlock() + if r.dec == nil { + return 0, errors.New("read after close or EOF") + } + dec, err := r.dec.Read(p) + if err == io.EOF { + r.dec.Reset(nil) + r.pool.Put(r.dec) + r.dec = nil + } + return dec, err +} + +func (r *pooledZipReader) Close() error { + r.mu.Lock() + defer r.mu.Unlock() + var err error + if r.dec != nil { + err = r.dec.Reset(nil) + r.pool.Put(r.dec) + r.dec = nil + } + return err +} + +type pooledZipWriter struct { + mu sync.Mutex // guards Close and Read + enc *Encoder + pool *sync.Pool +} + +func (w *pooledZipWriter) Write(p []byte) (n int, err error) { + w.mu.Lock() + defer w.mu.Unlock() + if w.enc == nil { + return 0, errors.New("Write after Close") + } + return w.enc.Write(p) +} + +func (w *pooledZipWriter) Close() error { + w.mu.Lock() + defer w.mu.Unlock() + var err error + if w.enc != nil { + err = w.enc.Close() + w.pool.Put(w.enc) + w.enc = nil + } + return err +} + +// ZipCompressor returns a compressor that can be registered with zip libraries. +// The provided encoder options will be used on all encodes. +func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) { + var pool sync.Pool + return func(w io.Writer) (io.WriteCloser, error) { + enc, ok := pool.Get().(*Encoder) + if ok { + enc.Reset(w) + } else { + var err error + enc, err = NewWriter(w, opts...) + if err != nil { + return nil, err + } + } + return &pooledZipWriter{enc: enc, pool: &pool}, nil + } +} + +// ZipDecompressor returns a decompressor that can be registered with zip libraries. +// See ZipCompressor for example. +// Options can be specified. WithDecoderConcurrency(1) is forced, +// and by default a 128MB maximum decompression window is specified. +// The window size can be overridden if required. +func ZipDecompressor(opts ...DOption) func(r io.Reader) io.ReadCloser { + return newZipReader(opts...) +} diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go new file mode 100644 index 00000000..4be7cc73 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -0,0 +1,121 @@ +// Package zstd provides decompression of zstandard files. +// +// For advanced usage and examples, go to the README: https://github.com/klauspost/compress/tree/master/zstd#zstd +package zstd + +import ( + "bytes" + "encoding/binary" + "errors" + "log" + "math" +) + +// enable debug printing +const debug = false + +// enable encoding debug printing +const debugEncoder = debug + +// enable decoding debug printing +const debugDecoder = debug + +// Enable extra assertions. +const debugAsserts = debug || false + +// print sequence details +const debugSequences = false + +// print detailed matching information +const debugMatches = false + +// force encoder to use predefined tables. +const forcePreDef = false + +// zstdMinMatch is the minimum zstd match length. +const zstdMinMatch = 3 + +// fcsUnknown is used for unknown frame content size. +const fcsUnknown = math.MaxUint64 + +var ( + // ErrReservedBlockType is returned when a reserved block type is found. + // Typically this indicates wrong or corrupted input. + ErrReservedBlockType = errors.New("invalid input: reserved block type encountered") + + // ErrCompressedSizeTooBig is returned when a block is bigger than allowed. + // Typically this indicates wrong or corrupted input. + ErrCompressedSizeTooBig = errors.New("invalid input: compressed size too big") + + // ErrBlockTooSmall is returned when a block is too small to be decoded. + // Typically returned on invalid input. + ErrBlockTooSmall = errors.New("block too small") + + // ErrUnexpectedBlockSize is returned when a block has unexpected size. + // Typically returned on invalid input. + ErrUnexpectedBlockSize = errors.New("unexpected block size") + + // ErrMagicMismatch is returned when a "magic" number isn't what is expected. + // Typically this indicates wrong or corrupted input. + ErrMagicMismatch = errors.New("invalid input: magic number mismatch") + + // ErrWindowSizeExceeded is returned when a reference exceeds the valid window size. + // Typically this indicates wrong or corrupted input. + ErrWindowSizeExceeded = errors.New("window size exceeded") + + // ErrWindowSizeTooSmall is returned when no window size is specified. + // Typically this indicates wrong or corrupted input. + ErrWindowSizeTooSmall = errors.New("invalid input: window size was too small") + + // ErrDecoderSizeExceeded is returned if decompressed size exceeds the configured limit. + ErrDecoderSizeExceeded = errors.New("decompressed size exceeds configured limit") + + // ErrUnknownDictionary is returned if the dictionary ID is unknown. + ErrUnknownDictionary = errors.New("unknown dictionary") + + // ErrFrameSizeExceeded is returned if the stated frame size is exceeded. + // This is only returned if SingleSegment is specified on the frame. + ErrFrameSizeExceeded = errors.New("frame size exceeded") + + // ErrFrameSizeMismatch is returned if the stated frame size does not match the expected size. + // This is only returned if SingleSegment is specified on the frame. + ErrFrameSizeMismatch = errors.New("frame size does not match size on stream") + + // ErrCRCMismatch is returned if CRC mismatches. + ErrCRCMismatch = errors.New("CRC check failed") + + // ErrDecoderClosed will be returned if the Decoder was used after + // Close has been called. + ErrDecoderClosed = errors.New("decoder used after Close") + + // ErrDecoderNilInput is returned when a nil Reader was provided + // and an operation other than Reset/DecodeAll/Close was attempted. + ErrDecoderNilInput = errors.New("nil input provided as reader") +) + +func println(a ...interface{}) { + if debug || debugDecoder || debugEncoder { + log.Println(a...) + } +} + +func printf(format string, a ...interface{}) { + if debug || debugDecoder || debugEncoder { + log.Printf(format, a...) + } +} + +func load3232(b []byte, i int32) uint32 { + return binary.LittleEndian.Uint32(b[:len(b):len(b)][i:]) +} + +func load6432(b []byte, i int32) uint64 { + return binary.LittleEndian.Uint64(b[:len(b):len(b)][i:]) +} + +type byter interface { + Bytes() []byte + Len() int +} + +var _ byter = &bytes.Buffer{} diff --git a/vendor/github.com/klauspost/cpuid/v2/CONTRIBUTING.txt b/vendor/github.com/klauspost/cpuid/v2/CONTRIBUTING.txt index 2ef4714f..452d28ed 100644 --- a/vendor/github.com/klauspost/cpuid/v2/CONTRIBUTING.txt +++ b/vendor/github.com/klauspost/cpuid/v2/CONTRIBUTING.txt @@ -1,35 +1,35 @@ -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2015- Klaus Post & Contributors. -Email: klauspost@gmail.com - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2015- Klaus Post & Contributors. +Email: klauspost@gmail.com + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. diff --git a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md new file mode 100644 index 00000000..c7582349 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md @@ -0,0 +1,96 @@ +## 1.5.0 + +* New option `IgnoreUntaggedFields` to ignore decoding to any fields + without `mapstructure` (or the configured tag name) set [GH-277] +* New option `ErrorUnset` which makes it an error if any fields + in a target struct are not set by the decoding process. [GH-225] +* New function `OrComposeDecodeHookFunc` to help compose decode hooks. [GH-240] +* Decoding to slice from array no longer crashes [GH-265] +* Decode nested struct pointers to map [GH-271] +* Fix issue where `,squash` was ignored if `Squash` option was set. [GH-280] +* Fix issue where fields with `,omitempty` would sometimes decode + into a map with an empty string key [GH-281] + +## 1.4.3 + +* Fix cases where `json.Number` didn't decode properly [GH-261] + +## 1.4.2 + +* Custom name matchers to support any sort of casing, formatting, etc. for + field names. [GH-250] +* Fix possible panic in ComposeDecodeHookFunc [GH-251] + +## 1.4.1 + +* Fix regression where `*time.Time` value would be set to empty and not be sent + to decode hooks properly [GH-232] + +## 1.4.0 + +* A new decode hook type `DecodeHookFuncValue` has been added that has + access to the full values. [GH-183] +* Squash is now supported with embedded fields that are struct pointers [GH-205] +* Empty strings will convert to 0 for all numeric types when weakly decoding [GH-206] + +## 1.3.3 + +* Decoding maps from maps creates a settable value for decode hooks [GH-203] + +## 1.3.2 + +* Decode into interface type with a struct value is supported [GH-187] + +## 1.3.1 + +* Squash should only squash embedded structs. [GH-194] + +## 1.3.0 + +* Added `",omitempty"` support. This will ignore zero values in the source + structure when encoding. [GH-145] + +## 1.2.3 + +* Fix duplicate entries in Keys list with pointer values. [GH-185] + +## 1.2.2 + +* Do not add unsettable (unexported) values to the unused metadata key + or "remain" value. [GH-150] + +## 1.2.1 + +* Go modules checksum mismatch fix + +## 1.2.0 + +* Added support to capture unused values in a field using the `",remain"` value + in the mapstructure tag. There is an example to showcase usage. +* Added `DecoderConfig` option to always squash embedded structs +* `json.Number` can decode into `uint` types +* Empty slices are preserved and not replaced with nil slices +* Fix panic that can occur in when decoding a map into a nil slice of structs +* Improved package documentation for godoc + +## 1.1.2 + +* Fix error when decode hook decodes interface implementation into interface + type. [GH-140] + +## 1.1.1 + +* Fix panic that can happen in `decodePtr` + +## 1.1.0 + +* Added `StringToIPHookFunc` to convert `string` to `net.IP` and `net.IPNet` [GH-133] +* Support struct to struct decoding [GH-137] +* If source map value is nil, then destination map value is nil (instead of empty) +* If source slice value is nil, then destination slice value is nil (instead of empty) +* If source pointer is nil, then destination pointer is set to nil (instead of + allocated zero value of type) + +## 1.0.0 + +* Initial tagged stable release. diff --git a/vendor/github.com/mitchellh/mapstructure/LICENSE b/vendor/github.com/mitchellh/mapstructure/LICENSE new file mode 100644 index 00000000..f9c841a5 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/mapstructure/README.md b/vendor/github.com/mitchellh/mapstructure/README.md new file mode 100644 index 00000000..0018dc7d --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/README.md @@ -0,0 +1,46 @@ +# mapstructure [![Godoc](https://godoc.org/github.com/mitchellh/mapstructure?status.svg)](https://godoc.org/github.com/mitchellh/mapstructure) + +mapstructure is a Go library for decoding generic map values to structures +and vice versa, while providing helpful error handling. + +This library is most useful when decoding values from some data stream (JSON, +Gob, etc.) where you don't _quite_ know the structure of the underlying data +until you read a part of it. You can therefore read a `map[string]interface{}` +and use this library to decode it into the proper underlying native Go +structure. + +## Installation + +Standard `go get`: + +``` +$ go get github.com/mitchellh/mapstructure +``` + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/mapstructure). + +The `Decode` function has examples associated with it there. + +## But Why?! + +Go offers fantastic standard libraries for decoding formats such as JSON. +The standard method is to have a struct pre-created, and populate that struct +from the bytes of the encoded format. This is great, but the problem is if +you have configuration or an encoding that changes slightly depending on +specific fields. For example, consider this JSON: + +```json +{ + "type": "person", + "name": "Mitchell" +} +``` + +Perhaps we can't populate a specific structure without first reading +the "type" field from the JSON. We could always do two passes over the +decoding of the JSON (reading the "type" first, and the rest later). +However, it is much simpler to just decode this into a `map[string]interface{}` +structure, read the "type" key, then use something like this library +to decode it into the proper structure. diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go new file mode 100644 index 00000000..3a754ca7 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go @@ -0,0 +1,279 @@ +package mapstructure + +import ( + "encoding" + "errors" + "fmt" + "net" + "reflect" + "strconv" + "strings" + "time" +) + +// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns +// it into the proper DecodeHookFunc type, such as DecodeHookFuncType. +func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc { + // Create variables here so we can reference them with the reflect pkg + var f1 DecodeHookFuncType + var f2 DecodeHookFuncKind + var f3 DecodeHookFuncValue + + // Fill in the variables into this interface and the rest is done + // automatically using the reflect package. + potential := []interface{}{f1, f2, f3} + + v := reflect.ValueOf(h) + vt := v.Type() + for _, raw := range potential { + pt := reflect.ValueOf(raw).Type() + if vt.ConvertibleTo(pt) { + return v.Convert(pt).Interface() + } + } + + return nil +} + +// DecodeHookExec executes the given decode hook. This should be used +// since it'll naturally degrade to the older backwards compatible DecodeHookFunc +// that took reflect.Kind instead of reflect.Type. +func DecodeHookExec( + raw DecodeHookFunc, + from reflect.Value, to reflect.Value) (interface{}, error) { + + switch f := typedDecodeHook(raw).(type) { + case DecodeHookFuncType: + return f(from.Type(), to.Type(), from.Interface()) + case DecodeHookFuncKind: + return f(from.Kind(), to.Kind(), from.Interface()) + case DecodeHookFuncValue: + return f(from, to) + default: + return nil, errors.New("invalid decode hook signature") + } +} + +// ComposeDecodeHookFunc creates a single DecodeHookFunc that +// automatically composes multiple DecodeHookFuncs. +// +// The composed funcs are called in order, with the result of the +// previous transformation. +func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { + return func(f reflect.Value, t reflect.Value) (interface{}, error) { + var err error + data := f.Interface() + + newFrom := f + for _, f1 := range fs { + data, err = DecodeHookExec(f1, newFrom, t) + if err != nil { + return nil, err + } + newFrom = reflect.ValueOf(data) + } + + return data, nil + } +} + +// OrComposeDecodeHookFunc executes all input hook functions until one of them returns no error. In that case its value is returned. +// If all hooks return an error, OrComposeDecodeHookFunc returns an error concatenating all error messages. +func OrComposeDecodeHookFunc(ff ...DecodeHookFunc) DecodeHookFunc { + return func(a, b reflect.Value) (interface{}, error) { + var allErrs string + var out interface{} + var err error + + for _, f := range ff { + out, err = DecodeHookExec(f, a, b) + if err != nil { + allErrs += err.Error() + "\n" + continue + } + + return out, nil + } + + return nil, errors.New(allErrs) + } +} + +// StringToSliceHookFunc returns a DecodeHookFunc that converts +// string to []string by splitting on the given sep. +func StringToSliceHookFunc(sep string) DecodeHookFunc { + return func( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + if f != reflect.String || t != reflect.Slice { + return data, nil + } + + raw := data.(string) + if raw == "" { + return []string{}, nil + } + + return strings.Split(raw, sep), nil + } +} + +// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts +// strings to time.Duration. +func StringToTimeDurationHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(time.Duration(5)) { + return data, nil + } + + // Convert it by parsing + return time.ParseDuration(data.(string)) + } +} + +// StringToIPHookFunc returns a DecodeHookFunc that converts +// strings to net.IP +func StringToIPHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(net.IP{}) { + return data, nil + } + + // Convert it by parsing + ip := net.ParseIP(data.(string)) + if ip == nil { + return net.IP{}, fmt.Errorf("failed parsing ip %v", data) + } + + return ip, nil + } +} + +// StringToIPNetHookFunc returns a DecodeHookFunc that converts +// strings to net.IPNet +func StringToIPNetHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(net.IPNet{}) { + return data, nil + } + + // Convert it by parsing + _, net, err := net.ParseCIDR(data.(string)) + return net, err + } +} + +// StringToTimeHookFunc returns a DecodeHookFunc that converts +// strings to time.Time. +func StringToTimeHookFunc(layout string) DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(time.Time{}) { + return data, nil + } + + // Convert it by parsing + return time.Parse(layout, data.(string)) + } +} + +// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to +// the decoder. +// +// Note that this is significantly different from the WeaklyTypedInput option +// of the DecoderConfig. +func WeaklyTypedHook( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + dataVal := reflect.ValueOf(data) + switch t { + case reflect.String: + switch f { + case reflect.Bool: + if dataVal.Bool() { + return "1", nil + } + return "0", nil + case reflect.Float32: + return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil + case reflect.Int: + return strconv.FormatInt(dataVal.Int(), 10), nil + case reflect.Slice: + dataType := dataVal.Type() + elemKind := dataType.Elem().Kind() + if elemKind == reflect.Uint8 { + return string(dataVal.Interface().([]uint8)), nil + } + case reflect.Uint: + return strconv.FormatUint(dataVal.Uint(), 10), nil + } + } + + return data, nil +} + +func RecursiveStructToMapHookFunc() DecodeHookFunc { + return func(f reflect.Value, t reflect.Value) (interface{}, error) { + if f.Kind() != reflect.Struct { + return f.Interface(), nil + } + + var i interface{} = struct{}{} + if t.Type() != reflect.TypeOf(&i).Elem() { + return f.Interface(), nil + } + + m := make(map[string]interface{}) + t.Set(reflect.ValueOf(m)) + + return f.Interface(), nil + } +} + +// TextUnmarshallerHookFunc returns a DecodeHookFunc that applies +// strings to the UnmarshalText function, when the target type +// implements the encoding.TextUnmarshaler interface +func TextUnmarshallerHookFunc() DecodeHookFuncType { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + result := reflect.New(t).Interface() + unmarshaller, ok := result.(encoding.TextUnmarshaler) + if !ok { + return data, nil + } + if err := unmarshaller.UnmarshalText([]byte(data.(string))); err != nil { + return nil, err + } + return result, nil + } +} diff --git a/vendor/github.com/mitchellh/mapstructure/error.go b/vendor/github.com/mitchellh/mapstructure/error.go new file mode 100644 index 00000000..47a99e5a --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/error.go @@ -0,0 +1,50 @@ +package mapstructure + +import ( + "errors" + "fmt" + "sort" + "strings" +) + +// Error implements the error interface and can represents multiple +// errors that occur in the course of a single decode. +type Error struct { + Errors []string +} + +func (e *Error) Error() string { + points := make([]string, len(e.Errors)) + for i, err := range e.Errors { + points[i] = fmt.Sprintf("* %s", err) + } + + sort.Strings(points) + return fmt.Sprintf( + "%d error(s) decoding:\n\n%s", + len(e.Errors), strings.Join(points, "\n")) +} + +// WrappedErrors implements the errwrap.Wrapper interface to make this +// return value more useful with the errwrap and go-multierror libraries. +func (e *Error) WrappedErrors() []error { + if e == nil { + return nil + } + + result := make([]error, len(e.Errors)) + for i, e := range e.Errors { + result[i] = errors.New(e) + } + + return result +} + +func appendErrors(errors []string, err error) []string { + switch e := err.(type) { + case *Error: + return append(errors, e.Errors...) + default: + return append(errors, e.Error()) + } +} diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go new file mode 100644 index 00000000..1efb22ac --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go @@ -0,0 +1,1540 @@ +// Package mapstructure exposes functionality to convert one arbitrary +// Go type into another, typically to convert a map[string]interface{} +// into a native Go structure. +// +// The Go structure can be arbitrarily complex, containing slices, +// other structs, etc. and the decoder will properly decode nested +// maps and so on into the proper structures in the native Go struct. +// See the examples to see what the decoder is capable of. +// +// The simplest function to start with is Decode. +// +// Field Tags +// +// When decoding to a struct, mapstructure will use the field name by +// default to perform the mapping. For example, if a struct has a field +// "Username" then mapstructure will look for a key in the source value +// of "username" (case insensitive). +// +// type User struct { +// Username string +// } +// +// You can change the behavior of mapstructure by using struct tags. +// The default struct tag that mapstructure looks for is "mapstructure" +// but you can customize it using DecoderConfig. +// +// Renaming Fields +// +// To rename the key that mapstructure looks for, use the "mapstructure" +// tag and set a value directly. For example, to change the "username" example +// above to "user": +// +// type User struct { +// Username string `mapstructure:"user"` +// } +// +// Embedded Structs and Squashing +// +// Embedded structs are treated as if they're another field with that name. +// By default, the two structs below are equivalent when decoding with +// mapstructure: +// +// type Person struct { +// Name string +// } +// +// type Friend struct { +// Person +// } +// +// type Friend struct { +// Person Person +// } +// +// This would require an input that looks like below: +// +// map[string]interface{}{ +// "person": map[string]interface{}{"name": "alice"}, +// } +// +// If your "person" value is NOT nested, then you can append ",squash" to +// your tag value and mapstructure will treat it as if the embedded struct +// were part of the struct directly. Example: +// +// type Friend struct { +// Person `mapstructure:",squash"` +// } +// +// Now the following input would be accepted: +// +// map[string]interface{}{ +// "name": "alice", +// } +// +// When decoding from a struct to a map, the squash tag squashes the struct +// fields into a single map. Using the example structs from above: +// +// Friend{Person: Person{Name: "alice"}} +// +// Will be decoded into a map: +// +// map[string]interface{}{ +// "name": "alice", +// } +// +// DecoderConfig has a field that changes the behavior of mapstructure +// to always squash embedded structs. +// +// Remainder Values +// +// If there are any unmapped keys in the source value, mapstructure by +// default will silently ignore them. You can error by setting ErrorUnused +// in DecoderConfig. If you're using Metadata you can also maintain a slice +// of the unused keys. +// +// You can also use the ",remain" suffix on your tag to collect all unused +// values in a map. The field with this tag MUST be a map type and should +// probably be a "map[string]interface{}" or "map[interface{}]interface{}". +// See example below: +// +// type Friend struct { +// Name string +// Other map[string]interface{} `mapstructure:",remain"` +// } +// +// Given the input below, Other would be populated with the other +// values that weren't used (everything but "name"): +// +// map[string]interface{}{ +// "name": "bob", +// "address": "123 Maple St.", +// } +// +// Omit Empty Values +// +// When decoding from a struct to any other value, you may use the +// ",omitempty" suffix on your tag to omit that value if it equates to +// the zero value. The zero value of all types is specified in the Go +// specification. +// +// For example, the zero type of a numeric type is zero ("0"). If the struct +// field value is zero and a numeric type, the field is empty, and it won't +// be encoded into the destination type. +// +// type Source struct { +// Age int `mapstructure:",omitempty"` +// } +// +// Unexported fields +// +// Since unexported (private) struct fields cannot be set outside the package +// where they are defined, the decoder will simply skip them. +// +// For this output type definition: +// +// type Exported struct { +// private string // this unexported field will be skipped +// Public string +// } +// +// Using this map as input: +// +// map[string]interface{}{ +// "private": "I will be ignored", +// "Public": "I made it through!", +// } +// +// The following struct will be decoded: +// +// type Exported struct { +// private: "" // field is left with an empty string (zero value) +// Public: "I made it through!" +// } +// +// Other Configuration +// +// mapstructure is highly configurable. See the DecoderConfig struct +// for other features and options that are supported. +package mapstructure + +import ( + "encoding/json" + "errors" + "fmt" + "reflect" + "sort" + "strconv" + "strings" +) + +// DecodeHookFunc is the callback function that can be used for +// data transformations. See "DecodeHook" in the DecoderConfig +// struct. +// +// The type must be one of DecodeHookFuncType, DecodeHookFuncKind, or +// DecodeHookFuncValue. +// Values are a superset of Types (Values can return types), and Types are a +// superset of Kinds (Types can return Kinds) and are generally a richer thing +// to use, but Kinds are simpler if you only need those. +// +// The reason DecodeHookFunc is multi-typed is for backwards compatibility: +// we started with Kinds and then realized Types were the better solution, +// but have a promise to not break backwards compat so we now support +// both. +type DecodeHookFunc interface{} + +// DecodeHookFuncType is a DecodeHookFunc which has complete information about +// the source and target types. +type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error) + +// DecodeHookFuncKind is a DecodeHookFunc which knows only the Kinds of the +// source and target types. +type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) + +// DecodeHookFuncValue is a DecodeHookFunc which has complete access to both the source and target +// values. +type DecodeHookFuncValue func(from reflect.Value, to reflect.Value) (interface{}, error) + +// DecoderConfig is the configuration that is used to create a new decoder +// and allows customization of various aspects of decoding. +type DecoderConfig struct { + // DecodeHook, if set, will be called before any decoding and any + // type conversion (if WeaklyTypedInput is on). This lets you modify + // the values before they're set down onto the resulting struct. The + // DecodeHook is called for every map and value in the input. This means + // that if a struct has embedded fields with squash tags the decode hook + // is called only once with all of the input data, not once for each + // embedded struct. + // + // If an error is returned, the entire decode will fail with that error. + DecodeHook DecodeHookFunc + + // If ErrorUnused is true, then it is an error for there to exist + // keys in the original map that were unused in the decoding process + // (extra keys). + ErrorUnused bool + + // If ErrorUnset is true, then it is an error for there to exist + // fields in the result that were not set in the decoding process + // (extra fields). This only applies to decoding to a struct. This + // will affect all nested structs as well. + ErrorUnset bool + + // ZeroFields, if set to true, will zero fields before writing them. + // For example, a map will be emptied before decoded values are put in + // it. If this is false, a map will be merged. + ZeroFields bool + + // If WeaklyTypedInput is true, the decoder will make the following + // "weak" conversions: + // + // - bools to string (true = "1", false = "0") + // - numbers to string (base 10) + // - bools to int/uint (true = 1, false = 0) + // - strings to int/uint (base implied by prefix) + // - int to bool (true if value != 0) + // - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F, + // FALSE, false, False. Anything else is an error) + // - empty array = empty map and vice versa + // - negative numbers to overflowed uint values (base 10) + // - slice of maps to a merged map + // - single values are converted to slices if required. Each + // element is weakly decoded. For example: "4" can become []int{4} + // if the target type is an int slice. + // + WeaklyTypedInput bool + + // Squash will squash embedded structs. A squash tag may also be + // added to an individual struct field using a tag. For example: + // + // type Parent struct { + // Child `mapstructure:",squash"` + // } + Squash bool + + // Metadata is the struct that will contain extra metadata about + // the decoding. If this is nil, then no metadata will be tracked. + Metadata *Metadata + + // Result is a pointer to the struct that will contain the decoded + // value. + Result interface{} + + // The tag name that mapstructure reads for field names. This + // defaults to "mapstructure" + TagName string + + // IgnoreUntaggedFields ignores all struct fields without explicit + // TagName, comparable to `mapstructure:"-"` as default behaviour. + IgnoreUntaggedFields bool + + // MatchName is the function used to match the map key to the struct + // field name or tag. Defaults to `strings.EqualFold`. This can be used + // to implement case-sensitive tag values, support snake casing, etc. + MatchName func(mapKey, fieldName string) bool +} + +// A Decoder takes a raw interface value and turns it into structured +// data, keeping track of rich error information along the way in case +// anything goes wrong. Unlike the basic top-level Decode method, you can +// more finely control how the Decoder behaves using the DecoderConfig +// structure. The top-level Decode method is just a convenience that sets +// up the most basic Decoder. +type Decoder struct { + config *DecoderConfig +} + +// Metadata contains information about decoding a structure that +// is tedious or difficult to get otherwise. +type Metadata struct { + // Keys are the keys of the structure which were successfully decoded + Keys []string + + // Unused is a slice of keys that were found in the raw value but + // weren't decoded since there was no matching field in the result interface + Unused []string + + // Unset is a slice of field names that were found in the result interface + // but weren't set in the decoding process since there was no matching value + // in the input + Unset []string +} + +// Decode takes an input structure and uses reflection to translate it to +// the output structure. output must be a pointer to a map or struct. +func Decode(input interface{}, output interface{}) error { + config := &DecoderConfig{ + Metadata: nil, + Result: output, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// WeakDecode is the same as Decode but is shorthand to enable +// WeaklyTypedInput. See DecoderConfig for more info. +func WeakDecode(input, output interface{}) error { + config := &DecoderConfig{ + Metadata: nil, + Result: output, + WeaklyTypedInput: true, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// DecodeMetadata is the same as Decode, but is shorthand to +// enable metadata collection. See DecoderConfig for more info. +func DecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error { + config := &DecoderConfig{ + Metadata: metadata, + Result: output, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// WeakDecodeMetadata is the same as Decode, but is shorthand to +// enable both WeaklyTypedInput and metadata collection. See +// DecoderConfig for more info. +func WeakDecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error { + config := &DecoderConfig{ + Metadata: metadata, + Result: output, + WeaklyTypedInput: true, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// NewDecoder returns a new decoder for the given configuration. Once +// a decoder has been returned, the same configuration must not be used +// again. +func NewDecoder(config *DecoderConfig) (*Decoder, error) { + val := reflect.ValueOf(config.Result) + if val.Kind() != reflect.Ptr { + return nil, errors.New("result must be a pointer") + } + + val = val.Elem() + if !val.CanAddr() { + return nil, errors.New("result must be addressable (a pointer)") + } + + if config.Metadata != nil { + if config.Metadata.Keys == nil { + config.Metadata.Keys = make([]string, 0) + } + + if config.Metadata.Unused == nil { + config.Metadata.Unused = make([]string, 0) + } + + if config.Metadata.Unset == nil { + config.Metadata.Unset = make([]string, 0) + } + } + + if config.TagName == "" { + config.TagName = "mapstructure" + } + + if config.MatchName == nil { + config.MatchName = strings.EqualFold + } + + result := &Decoder{ + config: config, + } + + return result, nil +} + +// Decode decodes the given raw interface to the target pointer specified +// by the configuration. +func (d *Decoder) Decode(input interface{}) error { + return d.decode("", input, reflect.ValueOf(d.config.Result).Elem()) +} + +// Decodes an unknown data type into a specific reflection value. +func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) error { + var inputVal reflect.Value + if input != nil { + inputVal = reflect.ValueOf(input) + + // We need to check here if input is a typed nil. Typed nils won't + // match the "input == nil" below so we check that here. + if inputVal.Kind() == reflect.Ptr && inputVal.IsNil() { + input = nil + } + } + + if input == nil { + // If the data is nil, then we don't set anything, unless ZeroFields is set + // to true. + if d.config.ZeroFields { + outVal.Set(reflect.Zero(outVal.Type())) + + if d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + } + return nil + } + + if !inputVal.IsValid() { + // If the input value is invalid, then we just set the value + // to be the zero value. + outVal.Set(reflect.Zero(outVal.Type())) + if d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + return nil + } + + if d.config.DecodeHook != nil { + // We have a DecodeHook, so let's pre-process the input. + var err error + input, err = DecodeHookExec(d.config.DecodeHook, inputVal, outVal) + if err != nil { + return fmt.Errorf("error decoding '%s': %s", name, err) + } + } + + var err error + outputKind := getKind(outVal) + addMetaKey := true + switch outputKind { + case reflect.Bool: + err = d.decodeBool(name, input, outVal) + case reflect.Interface: + err = d.decodeBasic(name, input, outVal) + case reflect.String: + err = d.decodeString(name, input, outVal) + case reflect.Int: + err = d.decodeInt(name, input, outVal) + case reflect.Uint: + err = d.decodeUint(name, input, outVal) + case reflect.Float32: + err = d.decodeFloat(name, input, outVal) + case reflect.Struct: + err = d.decodeStruct(name, input, outVal) + case reflect.Map: + err = d.decodeMap(name, input, outVal) + case reflect.Ptr: + addMetaKey, err = d.decodePtr(name, input, outVal) + case reflect.Slice: + err = d.decodeSlice(name, input, outVal) + case reflect.Array: + err = d.decodeArray(name, input, outVal) + case reflect.Func: + err = d.decodeFunc(name, input, outVal) + default: + // If we reached this point then we weren't able to decode it + return fmt.Errorf("%s: unsupported type: %s", name, outputKind) + } + + // If we reached here, then we successfully decoded SOMETHING, so + // mark the key as used if we're tracking metainput. + if addMetaKey && d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + + return err +} + +// This decodes a basic type (bool, int, string, etc.) and sets the +// value to "data" of that type. +func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error { + if val.IsValid() && val.Elem().IsValid() { + elem := val.Elem() + + // If we can't address this element, then its not writable. Instead, + // we make a copy of the value (which is a pointer and therefore + // writable), decode into that, and replace the whole value. + copied := false + if !elem.CanAddr() { + copied = true + + // Make *T + copy := reflect.New(elem.Type()) + + // *T = elem + copy.Elem().Set(elem) + + // Set elem so we decode into it + elem = copy + } + + // Decode. If we have an error then return. We also return right + // away if we're not a copy because that means we decoded directly. + if err := d.decode(name, data, elem); err != nil || !copied { + return err + } + + // If we're a copy, we need to set te final result + val.Set(elem.Elem()) + return nil + } + + dataVal := reflect.ValueOf(data) + + // If the input data is a pointer, and the assigned type is the dereference + // of that exact pointer, then indirect it so that we can assign it. + // Example: *string to string + if dataVal.Kind() == reflect.Ptr && dataVal.Type().Elem() == val.Type() { + dataVal = reflect.Indirect(dataVal) + } + + if !dataVal.IsValid() { + dataVal = reflect.Zero(val.Type()) + } + + dataValType := dataVal.Type() + if !dataValType.AssignableTo(val.Type()) { + return fmt.Errorf( + "'%s' expected type '%s', got '%s'", + name, val.Type(), dataValType) + } + + val.Set(dataVal) + return nil +} + +func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + + converted := true + switch { + case dataKind == reflect.String: + val.SetString(dataVal.String()) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetString("1") + } else { + val.SetString("0") + } + case dataKind == reflect.Int && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatInt(dataVal.Int(), 10)) + case dataKind == reflect.Uint && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatUint(dataVal.Uint(), 10)) + case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64)) + case dataKind == reflect.Slice && d.config.WeaklyTypedInput, + dataKind == reflect.Array && d.config.WeaklyTypedInput: + dataType := dataVal.Type() + elemKind := dataType.Elem().Kind() + switch elemKind { + case reflect.Uint8: + var uints []uint8 + if dataKind == reflect.Array { + uints = make([]uint8, dataVal.Len(), dataVal.Len()) + for i := range uints { + uints[i] = dataVal.Index(i).Interface().(uint8) + } + } else { + uints = dataVal.Interface().([]uint8) + } + val.SetString(string(uints)) + default: + converted = false + } + default: + converted = false + } + + if !converted { + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + +func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + val.SetInt(dataVal.Int()) + case dataKind == reflect.Uint: + val.SetInt(int64(dataVal.Uint())) + case dataKind == reflect.Float32: + val.SetInt(int64(dataVal.Float())) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetInt(1) + } else { + val.SetInt(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + str := dataVal.String() + if str == "" { + str = "0" + } + + i, err := strconv.ParseInt(str, 0, val.Type().Bits()) + if err == nil { + val.SetInt(i) + } else { + return fmt.Errorf("cannot parse '%s' as int: %s", name, err) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := jn.Int64() + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetInt(i) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + +func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + i := dataVal.Int() + if i < 0 && !d.config.WeaklyTypedInput { + return fmt.Errorf("cannot parse '%s', %d overflows uint", + name, i) + } + val.SetUint(uint64(i)) + case dataKind == reflect.Uint: + val.SetUint(dataVal.Uint()) + case dataKind == reflect.Float32: + f := dataVal.Float() + if f < 0 && !d.config.WeaklyTypedInput { + return fmt.Errorf("cannot parse '%s', %f overflows uint", + name, f) + } + val.SetUint(uint64(f)) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetUint(1) + } else { + val.SetUint(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + str := dataVal.String() + if str == "" { + str = "0" + } + + i, err := strconv.ParseUint(str, 0, val.Type().Bits()) + if err == nil { + val.SetUint(i) + } else { + return fmt.Errorf("cannot parse '%s' as uint: %s", name, err) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := strconv.ParseUint(string(jn), 0, 64) + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetUint(i) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + +func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + + switch { + case dataKind == reflect.Bool: + val.SetBool(dataVal.Bool()) + case dataKind == reflect.Int && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Int() != 0) + case dataKind == reflect.Uint && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Uint() != 0) + case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Float() != 0) + case dataKind == reflect.String && d.config.WeaklyTypedInput: + b, err := strconv.ParseBool(dataVal.String()) + if err == nil { + val.SetBool(b) + } else if dataVal.String() == "" { + val.SetBool(false) + } else { + return fmt.Errorf("cannot parse '%s' as bool: %s", name, err) + } + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + +func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + val.SetFloat(float64(dataVal.Int())) + case dataKind == reflect.Uint: + val.SetFloat(float64(dataVal.Uint())) + case dataKind == reflect.Float32: + val.SetFloat(dataVal.Float()) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetFloat(1) + } else { + val.SetFloat(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + str := dataVal.String() + if str == "" { + str = "0" + } + + f, err := strconv.ParseFloat(str, val.Type().Bits()) + if err == nil { + val.SetFloat(f) + } else { + return fmt.Errorf("cannot parse '%s' as float: %s", name, err) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := jn.Float64() + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetFloat(i) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + +func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error { + valType := val.Type() + valKeyType := valType.Key() + valElemType := valType.Elem() + + // By default we overwrite keys in the current map + valMap := val + + // If the map is nil or we're purposely zeroing fields, make a new map + if valMap.IsNil() || d.config.ZeroFields { + // Make a new map to hold our result + mapType := reflect.MapOf(valKeyType, valElemType) + valMap = reflect.MakeMap(mapType) + } + + // Check input type and based on the input type jump to the proper func + dataVal := reflect.Indirect(reflect.ValueOf(data)) + switch dataVal.Kind() { + case reflect.Map: + return d.decodeMapFromMap(name, dataVal, val, valMap) + + case reflect.Struct: + return d.decodeMapFromStruct(name, dataVal, val, valMap) + + case reflect.Array, reflect.Slice: + if d.config.WeaklyTypedInput { + return d.decodeMapFromSlice(name, dataVal, val, valMap) + } + + fallthrough + + default: + return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) + } +} + +func (d *Decoder) decodeMapFromSlice(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { + // Special case for BC reasons (covered by tests) + if dataVal.Len() == 0 { + val.Set(valMap) + return nil + } + + for i := 0; i < dataVal.Len(); i++ { + err := d.decode( + name+"["+strconv.Itoa(i)+"]", + dataVal.Index(i).Interface(), val) + if err != nil { + return err + } + } + + return nil +} + +func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { + valType := val.Type() + valKeyType := valType.Key() + valElemType := valType.Elem() + + // Accumulate errors + errors := make([]string, 0) + + // If the input data is empty, then we just match what the input data is. + if dataVal.Len() == 0 { + if dataVal.IsNil() { + if !val.IsNil() { + val.Set(dataVal) + } + } else { + // Set to empty allocated value + val.Set(valMap) + } + + return nil + } + + for _, k := range dataVal.MapKeys() { + fieldName := name + "[" + k.String() + "]" + + // First decode the key into the proper type + currentKey := reflect.Indirect(reflect.New(valKeyType)) + if err := d.decode(fieldName, k.Interface(), currentKey); err != nil { + errors = appendErrors(errors, err) + continue + } + + // Next decode the data into the proper type + v := dataVal.MapIndex(k).Interface() + currentVal := reflect.Indirect(reflect.New(valElemType)) + if err := d.decode(fieldName, v, currentVal); err != nil { + errors = appendErrors(errors, err) + continue + } + + valMap.SetMapIndex(currentKey, currentVal) + } + + // Set the built up map to the value + val.Set(valMap) + + // If we had errors, return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { + typ := dataVal.Type() + for i := 0; i < typ.NumField(); i++ { + // Get the StructField first since this is a cheap operation. If the + // field is unexported, then ignore it. + f := typ.Field(i) + if f.PkgPath != "" { + continue + } + + // Next get the actual value of this field and verify it is assignable + // to the map value. + v := dataVal.Field(i) + if !v.Type().AssignableTo(valMap.Type().Elem()) { + return fmt.Errorf("cannot assign type '%s' to map value field of type '%s'", v.Type(), valMap.Type().Elem()) + } + + tagValue := f.Tag.Get(d.config.TagName) + keyName := f.Name + + if tagValue == "" && d.config.IgnoreUntaggedFields { + continue + } + + // If Squash is set in the config, we squash the field down. + squash := d.config.Squash && v.Kind() == reflect.Struct && f.Anonymous + + v = dereferencePtrToStructIfNeeded(v, d.config.TagName) + + // Determine the name of the key in the map + if index := strings.Index(tagValue, ","); index != -1 { + if tagValue[:index] == "-" { + continue + } + // If "omitempty" is specified in the tag, it ignores empty values. + if strings.Index(tagValue[index+1:], "omitempty") != -1 && isEmptyValue(v) { + continue + } + + // If "squash" is specified in the tag, we squash the field down. + squash = squash || strings.Index(tagValue[index+1:], "squash") != -1 + if squash { + // When squashing, the embedded type can be a pointer to a struct. + if v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct { + v = v.Elem() + } + + // The final type must be a struct + if v.Kind() != reflect.Struct { + return fmt.Errorf("cannot squash non-struct type '%s'", v.Type()) + } + } + if keyNameTagValue := tagValue[:index]; keyNameTagValue != "" { + keyName = keyNameTagValue + } + } else if len(tagValue) > 0 { + if tagValue == "-" { + continue + } + keyName = tagValue + } + + switch v.Kind() { + // this is an embedded struct, so handle it differently + case reflect.Struct: + x := reflect.New(v.Type()) + x.Elem().Set(v) + + vType := valMap.Type() + vKeyType := vType.Key() + vElemType := vType.Elem() + mType := reflect.MapOf(vKeyType, vElemType) + vMap := reflect.MakeMap(mType) + + // Creating a pointer to a map so that other methods can completely + // overwrite the map if need be (looking at you decodeMapFromMap). The + // indirection allows the underlying map to be settable (CanSet() == true) + // where as reflect.MakeMap returns an unsettable map. + addrVal := reflect.New(vMap.Type()) + reflect.Indirect(addrVal).Set(vMap) + + err := d.decode(keyName, x.Interface(), reflect.Indirect(addrVal)) + if err != nil { + return err + } + + // the underlying map may have been completely overwritten so pull + // it indirectly out of the enclosing value. + vMap = reflect.Indirect(addrVal) + + if squash { + for _, k := range vMap.MapKeys() { + valMap.SetMapIndex(k, vMap.MapIndex(k)) + } + } else { + valMap.SetMapIndex(reflect.ValueOf(keyName), vMap) + } + + default: + valMap.SetMapIndex(reflect.ValueOf(keyName), v) + } + } + + if val.CanAddr() { + val.Set(valMap) + } + + return nil +} + +func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) (bool, error) { + // If the input data is nil, then we want to just set the output + // pointer to be nil as well. + isNil := data == nil + if !isNil { + switch v := reflect.Indirect(reflect.ValueOf(data)); v.Kind() { + case reflect.Chan, + reflect.Func, + reflect.Interface, + reflect.Map, + reflect.Ptr, + reflect.Slice: + isNil = v.IsNil() + } + } + if isNil { + if !val.IsNil() && val.CanSet() { + nilValue := reflect.New(val.Type()).Elem() + val.Set(nilValue) + } + + return true, nil + } + + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + valType := val.Type() + valElemType := valType.Elem() + if val.CanSet() { + realVal := val + if realVal.IsNil() || d.config.ZeroFields { + realVal = reflect.New(valElemType) + } + + if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil { + return false, err + } + + val.Set(realVal) + } else { + if err := d.decode(name, data, reflect.Indirect(val)); err != nil { + return false, err + } + } + return false, nil +} + +func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error { + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + dataVal := reflect.Indirect(reflect.ValueOf(data)) + if val.Type() != dataVal.Type() { + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + val.Set(dataVal) + return nil +} + +func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataValKind := dataVal.Kind() + valType := val.Type() + valElemType := valType.Elem() + sliceType := reflect.SliceOf(valElemType) + + // If we have a non array/slice type then we first attempt to convert. + if dataValKind != reflect.Array && dataValKind != reflect.Slice { + if d.config.WeaklyTypedInput { + switch { + // Slice and array we use the normal logic + case dataValKind == reflect.Slice, dataValKind == reflect.Array: + break + + // Empty maps turn into empty slices + case dataValKind == reflect.Map: + if dataVal.Len() == 0 { + val.Set(reflect.MakeSlice(sliceType, 0, 0)) + return nil + } + // Create slice of maps of other sizes + return d.decodeSlice(name, []interface{}{data}, val) + + case dataValKind == reflect.String && valElemType.Kind() == reflect.Uint8: + return d.decodeSlice(name, []byte(dataVal.String()), val) + + // All other types we try to convert to the slice type + // and "lift" it into it. i.e. a string becomes a string slice. + default: + // Just re-try this function with data as a slice. + return d.decodeSlice(name, []interface{}{data}, val) + } + } + + return fmt.Errorf( + "'%s': source data must be an array or slice, got %s", name, dataValKind) + } + + // If the input value is nil, then don't allocate since empty != nil + if dataValKind != reflect.Array && dataVal.IsNil() { + return nil + } + + valSlice := val + if valSlice.IsNil() || d.config.ZeroFields { + // Make a new slice to hold our result, same size as the original data. + valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) + } + + // Accumulate any errors + errors := make([]string, 0) + + for i := 0; i < dataVal.Len(); i++ { + currentData := dataVal.Index(i).Interface() + for valSlice.Len() <= i { + valSlice = reflect.Append(valSlice, reflect.Zero(valElemType)) + } + currentField := valSlice.Index(i) + + fieldName := name + "[" + strconv.Itoa(i) + "]" + if err := d.decode(fieldName, currentData, currentField); err != nil { + errors = appendErrors(errors, err) + } + } + + // Finally, set the value to the slice we built up + val.Set(valSlice) + + // If there were errors, we return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataValKind := dataVal.Kind() + valType := val.Type() + valElemType := valType.Elem() + arrayType := reflect.ArrayOf(valType.Len(), valElemType) + + valArray := val + + if valArray.Interface() == reflect.Zero(valArray.Type()).Interface() || d.config.ZeroFields { + // Check input type + if dataValKind != reflect.Array && dataValKind != reflect.Slice { + if d.config.WeaklyTypedInput { + switch { + // Empty maps turn into empty arrays + case dataValKind == reflect.Map: + if dataVal.Len() == 0 { + val.Set(reflect.Zero(arrayType)) + return nil + } + + // All other types we try to convert to the array type + // and "lift" it into it. i.e. a string becomes a string array. + default: + // Just re-try this function with data as a slice. + return d.decodeArray(name, []interface{}{data}, val) + } + } + + return fmt.Errorf( + "'%s': source data must be an array or slice, got %s", name, dataValKind) + + } + if dataVal.Len() > arrayType.Len() { + return fmt.Errorf( + "'%s': expected source data to have length less or equal to %d, got %d", name, arrayType.Len(), dataVal.Len()) + + } + + // Make a new array to hold our result, same size as the original data. + valArray = reflect.New(arrayType).Elem() + } + + // Accumulate any errors + errors := make([]string, 0) + + for i := 0; i < dataVal.Len(); i++ { + currentData := dataVal.Index(i).Interface() + currentField := valArray.Index(i) + + fieldName := name + "[" + strconv.Itoa(i) + "]" + if err := d.decode(fieldName, currentData, currentField); err != nil { + errors = appendErrors(errors, err) + } + } + + // Finally, set the value to the array we built up + val.Set(valArray) + + // If there were errors, we return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + + // If the type of the value to write to and the data match directly, + // then we just set it directly instead of recursing into the structure. + if dataVal.Type() == val.Type() { + val.Set(dataVal) + return nil + } + + dataValKind := dataVal.Kind() + switch dataValKind { + case reflect.Map: + return d.decodeStructFromMap(name, dataVal, val) + + case reflect.Struct: + // Not the most efficient way to do this but we can optimize later if + // we want to. To convert from struct to struct we go to map first + // as an intermediary. + + // Make a new map to hold our result + mapType := reflect.TypeOf((map[string]interface{})(nil)) + mval := reflect.MakeMap(mapType) + + // Creating a pointer to a map so that other methods can completely + // overwrite the map if need be (looking at you decodeMapFromMap). The + // indirection allows the underlying map to be settable (CanSet() == true) + // where as reflect.MakeMap returns an unsettable map. + addrVal := reflect.New(mval.Type()) + + reflect.Indirect(addrVal).Set(mval) + if err := d.decodeMapFromStruct(name, dataVal, reflect.Indirect(addrVal), mval); err != nil { + return err + } + + result := d.decodeStructFromMap(name, reflect.Indirect(addrVal), val) + return result + + default: + return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) + } +} + +func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) error { + dataValType := dataVal.Type() + if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface { + return fmt.Errorf( + "'%s' needs a map with string keys, has '%s' keys", + name, dataValType.Key().Kind()) + } + + dataValKeys := make(map[reflect.Value]struct{}) + dataValKeysUnused := make(map[interface{}]struct{}) + for _, dataValKey := range dataVal.MapKeys() { + dataValKeys[dataValKey] = struct{}{} + dataValKeysUnused[dataValKey.Interface()] = struct{}{} + } + + targetValKeysUnused := make(map[interface{}]struct{}) + errors := make([]string, 0) + + // This slice will keep track of all the structs we'll be decoding. + // There can be more than one struct if there are embedded structs + // that are squashed. + structs := make([]reflect.Value, 1, 5) + structs[0] = val + + // Compile the list of all the fields that we're going to be decoding + // from all the structs. + type field struct { + field reflect.StructField + val reflect.Value + } + + // remainField is set to a valid field set with the "remain" tag if + // we are keeping track of remaining values. + var remainField *field + + fields := []field{} + for len(structs) > 0 { + structVal := structs[0] + structs = structs[1:] + + structType := structVal.Type() + + for i := 0; i < structType.NumField(); i++ { + fieldType := structType.Field(i) + fieldVal := structVal.Field(i) + if fieldVal.Kind() == reflect.Ptr && fieldVal.Elem().Kind() == reflect.Struct { + // Handle embedded struct pointers as embedded structs. + fieldVal = fieldVal.Elem() + } + + // If "squash" is specified in the tag, we squash the field down. + squash := d.config.Squash && fieldVal.Kind() == reflect.Struct && fieldType.Anonymous + remain := false + + // We always parse the tags cause we're looking for other tags too + tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",") + for _, tag := range tagParts[1:] { + if tag == "squash" { + squash = true + break + } + + if tag == "remain" { + remain = true + break + } + } + + if squash { + if fieldVal.Kind() != reflect.Struct { + errors = appendErrors(errors, + fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldVal.Kind())) + } else { + structs = append(structs, fieldVal) + } + continue + } + + // Build our field + if remain { + remainField = &field{fieldType, fieldVal} + } else { + // Normal struct field, store it away + fields = append(fields, field{fieldType, fieldVal}) + } + } + } + + // for fieldType, field := range fields { + for _, f := range fields { + field, fieldValue := f.field, f.val + fieldName := field.Name + + tagValue := field.Tag.Get(d.config.TagName) + tagValue = strings.SplitN(tagValue, ",", 2)[0] + if tagValue != "" { + fieldName = tagValue + } + + rawMapKey := reflect.ValueOf(fieldName) + rawMapVal := dataVal.MapIndex(rawMapKey) + if !rawMapVal.IsValid() { + // Do a slower search by iterating over each key and + // doing case-insensitive search. + for dataValKey := range dataValKeys { + mK, ok := dataValKey.Interface().(string) + if !ok { + // Not a string key + continue + } + + if d.config.MatchName(mK, fieldName) { + rawMapKey = dataValKey + rawMapVal = dataVal.MapIndex(dataValKey) + break + } + } + + if !rawMapVal.IsValid() { + // There was no matching key in the map for the value in + // the struct. Remember it for potential errors and metadata. + targetValKeysUnused[fieldName] = struct{}{} + continue + } + } + + if !fieldValue.IsValid() { + // This should never happen + panic("field is not valid") + } + + // If we can't set the field, then it is unexported or something, + // and we just continue onwards. + if !fieldValue.CanSet() { + continue + } + + // Delete the key we're using from the unused map so we stop tracking + delete(dataValKeysUnused, rawMapKey.Interface()) + + // If the name is empty string, then we're at the root, and we + // don't dot-join the fields. + if name != "" { + fieldName = name + "." + fieldName + } + + if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil { + errors = appendErrors(errors, err) + } + } + + // If we have a "remain"-tagged field and we have unused keys then + // we put the unused keys directly into the remain field. + if remainField != nil && len(dataValKeysUnused) > 0 { + // Build a map of only the unused values + remain := map[interface{}]interface{}{} + for key := range dataValKeysUnused { + remain[key] = dataVal.MapIndex(reflect.ValueOf(key)).Interface() + } + + // Decode it as-if we were just decoding this map onto our map. + if err := d.decodeMap(name, remain, remainField.val); err != nil { + errors = appendErrors(errors, err) + } + + // Set the map to nil so we have none so that the next check will + // not error (ErrorUnused) + dataValKeysUnused = nil + } + + if d.config.ErrorUnused && len(dataValKeysUnused) > 0 { + keys := make([]string, 0, len(dataValKeysUnused)) + for rawKey := range dataValKeysUnused { + keys = append(keys, rawKey.(string)) + } + sort.Strings(keys) + + err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", ")) + errors = appendErrors(errors, err) + } + + if d.config.ErrorUnset && len(targetValKeysUnused) > 0 { + keys := make([]string, 0, len(targetValKeysUnused)) + for rawKey := range targetValKeysUnused { + keys = append(keys, rawKey.(string)) + } + sort.Strings(keys) + + err := fmt.Errorf("'%s' has unset fields: %s", name, strings.Join(keys, ", ")) + errors = appendErrors(errors, err) + } + + if len(errors) > 0 { + return &Error{errors} + } + + // Add the unused keys to the list of unused keys if we're tracking metadata + if d.config.Metadata != nil { + for rawKey := range dataValKeysUnused { + key := rawKey.(string) + if name != "" { + key = name + "." + key + } + + d.config.Metadata.Unused = append(d.config.Metadata.Unused, key) + } + for rawKey := range targetValKeysUnused { + key := rawKey.(string) + if name != "" { + key = name + "." + key + } + + d.config.Metadata.Unset = append(d.config.Metadata.Unset, key) + } + } + + return nil +} + +func isEmptyValue(v reflect.Value) bool { + switch getKind(v) { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +func getKind(val reflect.Value) reflect.Kind { + kind := val.Kind() + + switch { + case kind >= reflect.Int && kind <= reflect.Int64: + return reflect.Int + case kind >= reflect.Uint && kind <= reflect.Uint64: + return reflect.Uint + case kind >= reflect.Float32 && kind <= reflect.Float64: + return reflect.Float32 + default: + return kind + } +} + +func isStructTypeConvertibleToMap(typ reflect.Type, checkMapstructureTags bool, tagName string) bool { + for i := 0; i < typ.NumField(); i++ { + f := typ.Field(i) + if f.PkgPath == "" && !checkMapstructureTags { // check for unexported fields + return true + } + if checkMapstructureTags && f.Tag.Get(tagName) != "" { // check for mapstructure tags inside + return true + } + } + return false +} + +func dereferencePtrToStructIfNeeded(v reflect.Value, tagName string) reflect.Value { + if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct { + return v + } + deref := v.Elem() + derefT := deref.Type() + if isStructTypeConvertibleToMap(derefT, true, tagName) { + return deref + } + return v +} diff --git a/vendor/github.com/munnerz/goautoneg/LICENSE b/vendor/github.com/munnerz/goautoneg/LICENSE new file mode 100644 index 00000000..bbc7b897 --- /dev/null +++ b/vendor/github.com/munnerz/goautoneg/LICENSE @@ -0,0 +1,31 @@ +Copyright (c) 2011, Open Knowledge Foundation Ltd. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + Neither the name of the Open Knowledge Foundation Ltd. nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/munnerz/goautoneg/Makefile b/vendor/github.com/munnerz/goautoneg/Makefile new file mode 100644 index 00000000..e33ee173 --- /dev/null +++ b/vendor/github.com/munnerz/goautoneg/Makefile @@ -0,0 +1,13 @@ +include $(GOROOT)/src/Make.inc + +TARG=bitbucket.org/ww/goautoneg +GOFILES=autoneg.go + +include $(GOROOT)/src/Make.pkg + +format: + gofmt -w *.go + +docs: + gomake clean + godoc ${TARG} > README.txt diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt b/vendor/github.com/munnerz/goautoneg/README.txt similarity index 100% rename from vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt rename to vendor/github.com/munnerz/goautoneg/README.txt diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/github.com/munnerz/goautoneg/autoneg.go similarity index 52% rename from vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go rename to vendor/github.com/munnerz/goautoneg/autoneg.go index a21b9d15..1dd1cad6 100644 --- a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go +++ b/vendor/github.com/munnerz/goautoneg/autoneg.go @@ -1,28 +1,28 @@ /* -Copyright (c) 2011, Open Knowledge Foundation Ltd. -All rights reserved. - HTTP Content-Type Autonegotiation. The functions in this package implement the behaviour specified in http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html +Copyright (c) 2011, Open Knowledge Foundation Ltd. +All rights reserved. + Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. - Neither the name of the Open Knowledge Foundation Ltd. nor the - names of its contributors may be used to endorse or promote - products derived from this software without specific prior written - permission. + Neither the name of the Open Knowledge Foundation Ltd. nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT @@ -36,6 +36,7 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ + package goautoneg import ( @@ -51,16 +52,14 @@ type Accept struct { Params map[string]string } -// For internal use, so that we can use the sort interface -type accept_slice []Accept +// acceptSlice is defined to implement sort interface. +type acceptSlice []Accept -func (accept accept_slice) Len() int { - slice := []Accept(accept) +func (slice acceptSlice) Len() int { return len(slice) } -func (accept accept_slice) Less(i, j int) bool { - slice := []Accept(accept) +func (slice acceptSlice) Less(i, j int) bool { ai, aj := slice[i], slice[j] if ai.Q > aj.Q { return true @@ -74,63 +73,93 @@ func (accept accept_slice) Less(i, j int) bool { return false } -func (accept accept_slice) Swap(i, j int) { - slice := []Accept(accept) +func (slice acceptSlice) Swap(i, j int) { slice[i], slice[j] = slice[j], slice[i] } +func stringTrimSpaceCutset(r rune) bool { + return r == ' ' +} + +func nextSplitElement(s, sep string) (item string, remaining string) { + if index := strings.Index(s, sep); index != -1 { + return s[:index], s[index+1:] + } + return s, "" +} + // Parse an Accept Header string returning a sorted list // of clauses -func ParseAccept(header string) (accept []Accept) { - parts := strings.Split(header, ",") - accept = make([]Accept, 0, len(parts)) - for _, part := range parts { - part := strings.Trim(part, " ") +func ParseAccept(header string) acceptSlice { + partsCount := 0 + remaining := header + for len(remaining) > 0 { + partsCount++ + _, remaining = nextSplitElement(remaining, ",") + } + accept := make(acceptSlice, 0, partsCount) - a := Accept{} - a.Params = make(map[string]string) - a.Q = 1.0 + remaining = header + var part string + for len(remaining) > 0 { + part, remaining = nextSplitElement(remaining, ",") + part = strings.TrimFunc(part, stringTrimSpaceCutset) - mrp := strings.Split(part, ";") + a := Accept{ + Q: 1.0, + } + + sp, remainingPart := nextSplitElement(part, ";") - media_range := mrp[0] - sp := strings.Split(media_range, "/") - a.Type = strings.Trim(sp[0], " ") + sp0, spRemaining := nextSplitElement(sp, "/") + a.Type = strings.TrimFunc(sp0, stringTrimSpaceCutset) switch { - case len(sp) == 1 && a.Type == "*": - a.SubType = "*" - case len(sp) == 2: - a.SubType = strings.Trim(sp[1], " ") + case len(spRemaining) == 0: + if a.Type == "*" { + a.SubType = "*" + } else { + continue + } default: - continue + var sp1 string + sp1, spRemaining = nextSplitElement(spRemaining, "/") + if len(spRemaining) > 0 { + continue + } + a.SubType = strings.TrimFunc(sp1, stringTrimSpaceCutset) } - if len(mrp) == 1 { + if len(remainingPart) == 0 { accept = append(accept, a) continue } - for _, param := range mrp[1:] { - sp := strings.SplitN(param, "=", 2) - if len(sp) != 2 { + a.Params = make(map[string]string) + for len(remainingPart) > 0 { + sp, remainingPart = nextSplitElement(remainingPart, ";") + sp0, spRemaining = nextSplitElement(sp, "=") + if len(spRemaining) == 0 { + continue + } + var sp1 string + sp1, spRemaining = nextSplitElement(spRemaining, "=") + if len(spRemaining) != 0 { continue } - token := strings.Trim(sp[0], " ") + token := strings.TrimFunc(sp0, stringTrimSpaceCutset) if token == "q" { - a.Q, _ = strconv.ParseFloat(sp[1], 32) + a.Q, _ = strconv.ParseFloat(sp1, 32) } else { - a.Params[token] = strings.Trim(sp[1], " ") + a.Params[token] = strings.TrimFunc(sp1, stringTrimSpaceCutset) } } accept = append(accept, a) } - slice := accept_slice(accept) - sort.Sort(slice) - - return + sort.Sort(accept) + return accept } // Negotiate the most appropriate content_type given the accept header diff --git a/vendor/github.com/prometheus/client_golang/NOTICE b/vendor/github.com/prometheus/client_golang/NOTICE index dd878a30..b9cc55ab 100644 --- a/vendor/github.com/prometheus/client_golang/NOTICE +++ b/vendor/github.com/prometheus/client_golang/NOTICE @@ -16,8 +16,3 @@ Go support for Protocol Buffers - Google's data interchange format http://github.com/golang/protobuf/ Copyright 2010 The Go Authors See source code for license details. - -Support for streaming Protocol Buffer messages for the Go language (golang). -https://github.com/matttproud/golang_protobuf_extensions -Copyright 2013 Matt T. Proud -Licensed under the Apache License, Version 2.0 diff --git a/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE new file mode 100644 index 00000000..65d761bc --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go new file mode 100644 index 00000000..8547c8df --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go @@ -0,0 +1,145 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd. + +// Package header provides functions for parsing HTTP headers. +package header + +import ( + "net/http" + "strings" +) + +// Octet types from RFC 2616. +var octetTypes [256]octetType + +type octetType byte + +const ( + isToken octetType = 1 << iota + isSpace +) + +func init() { + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t octetType + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) + if strings.ContainsRune(" \t\r\n", rune(c)) { + t |= isSpace + } + if isChar && !isCtl && !isSeparator { + t |= isToken + } + octetTypes[c] = t + } +} + +// AcceptSpec describes an Accept* header. +type AcceptSpec struct { + Value string + Q float64 +} + +// ParseAccept parses Accept* headers. +func ParseAccept(header http.Header, key string) (specs []AcceptSpec) { +loop: + for _, s := range header[key] { + for { + var spec AcceptSpec + spec.Value, s = expectTokenSlash(s) + if spec.Value == "" { + continue loop + } + spec.Q = 1.0 + s = skipSpace(s) + if strings.HasPrefix(s, ";") { + s = skipSpace(s[1:]) + if !strings.HasPrefix(s, "q=") { + continue loop + } + spec.Q, s = expectQuality(s[2:]) + if spec.Q < 0.0 { + continue loop + } + } + specs = append(specs, spec) + s = skipSpace(s) + if !strings.HasPrefix(s, ",") { + continue loop + } + s = skipSpace(s[1:]) + } + } + return +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpace == 0 { + break + } + } + return s[i:] +} + +func expectTokenSlash(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + b := s[i] + if (octetTypes[b]&isToken == 0) && b != '/' { + break + } + } + return s[:i], s[i:] +} + +func expectQuality(s string) (q float64, rest string) { + switch { + case len(s) == 0: + return -1, "" + case s[0] == '0': + q = 0 + case s[0] == '1': + q = 1 + default: + return -1, "" + } + s = s[1:] + if !strings.HasPrefix(s, ".") { + return q, s + } + s = s[1:] + i := 0 + n := 0 + d := 1 + for ; i < len(s); i++ { + b := s[i] + if b < '0' || b > '9' { + break + } + n = n*10 + int(b) - '0' + d *= 10 + } + return q + float64(n)/float64(d), s[i:] +} diff --git a/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go new file mode 100644 index 00000000..2e45780b --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go @@ -0,0 +1,36 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd. + +package httputil + +import ( + "net/http" + + "github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header" +) + +// NegotiateContentEncoding returns the best offered content encoding for the +// request's Accept-Encoding header. If two offers match with equal weight and +// then the offer earlier in the list is preferred. If no offers are +// acceptable, then "" is returned. +func NegotiateContentEncoding(r *http.Request, offers []string) string { + bestOffer := "identity" + bestQ := -1.0 + specs := header.ParseAccept(r.Header, "Accept-Encoding") + for _, offer := range offers { + for _, spec := range specs { + if spec.Q > bestQ && + (spec.Value == "*" || spec.Value == offer) { + bestQ = spec.Q + bestOffer = offer + } + } + } + if bestQ == 0 { + bestOffer = "" + } + return bestOffer +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go index ad9a71a5..520cbd7d 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go @@ -22,13 +22,13 @@ import ( // goRuntimeMemStats provides the metrics initially provided by runtime.ReadMemStats. // From Go 1.17 those similar (and better) statistics are provided by runtime/metrics, so // while eval closure works on runtime.MemStats, the struct from Go 1.17+ is -// populated using runtime/metrics. +// populated using runtime/metrics. Those are the defaults we can't alter. func goRuntimeMemStats() memStatsMetrics { return memStatsMetrics{ { desc: NewDesc( memstatNamespace("alloc_bytes"), - "Number of bytes allocated and still in use.", + "Number of bytes allocated in heap and currently in use. Equals to /memory/classes/heap/objects:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) }, @@ -36,7 +36,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("alloc_bytes_total"), - "Total number of bytes allocated, even if freed.", + "Total number of bytes allocated in heap until now, even if released already. Equals to /gc/heap/allocs:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) }, @@ -44,23 +44,16 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("sys_bytes"), - "Number of bytes obtained from system.", + "Number of bytes obtained from system. Equals to /memory/classes/total:byte.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) }, valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("lookups_total"), - "Total number of pointer lookups.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) }, - valType: CounterValue, }, { desc: NewDesc( memstatNamespace("mallocs_total"), - "Total number of mallocs.", + // TODO(bwplotka): We could add go_memstats_heap_objects, probably useful for discovery. Let's gather more feedback, kind of a waste of bytes for everybody for compatibility reasons to keep both, and we can't really rename/remove useful metric. + "Total number of heap objects allocated, both live and gc-ed. Semantically a counter version for go_memstats_heap_objects gauge. Equals to /gc/heap/allocs:objects + /gc/heap/tiny/allocs:objects.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) }, @@ -68,7 +61,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("frees_total"), - "Total number of frees.", + "Total number of heap objects frees. Equals to /gc/heap/frees:objects + /gc/heap/tiny/allocs:objects.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) }, @@ -76,7 +69,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_alloc_bytes"), - "Number of heap bytes allocated and still in use.", + "Number of heap bytes allocated and currently in use, same as go_memstats_alloc_bytes. Equals to /memory/classes/heap/objects:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) }, @@ -84,7 +77,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_sys_bytes"), - "Number of heap bytes obtained from system.", + "Number of heap bytes obtained from system. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes + /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) }, @@ -92,7 +85,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_idle_bytes"), - "Number of heap bytes waiting to be used.", + "Number of heap bytes waiting to be used. Equals to /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) }, @@ -100,7 +93,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_inuse_bytes"), - "Number of heap bytes that are in use.", + "Number of heap bytes that are in use. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) }, @@ -108,7 +101,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_released_bytes"), - "Number of heap bytes released to OS.", + "Number of heap bytes released to OS. Equals to /memory/classes/heap/released:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) }, @@ -116,7 +109,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_objects"), - "Number of allocated objects.", + "Number of currently allocated objects. Equals to /gc/heap/objects:objects.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) }, @@ -124,7 +117,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("stack_inuse_bytes"), - "Number of bytes in use by the stack allocator.", + "Number of bytes obtained from system for stack allocator in non-CGO environments. Equals to /memory/classes/heap/stacks:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) }, @@ -132,7 +125,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("stack_sys_bytes"), - "Number of bytes obtained from system for stack allocator.", + "Number of bytes obtained from system for stack allocator. Equals to /memory/classes/heap/stacks:bytes + /memory/classes/os-stacks:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) }, @@ -140,7 +133,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("mspan_inuse_bytes"), - "Number of bytes in use by mspan structures.", + "Number of bytes in use by mspan structures. Equals to /memory/classes/metadata/mspan/inuse:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) }, @@ -148,7 +141,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("mspan_sys_bytes"), - "Number of bytes used for mspan structures obtained from system.", + "Number of bytes used for mspan structures obtained from system. Equals to /memory/classes/metadata/mspan/inuse:bytes + /memory/classes/metadata/mspan/free:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) }, @@ -156,7 +149,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("mcache_inuse_bytes"), - "Number of bytes in use by mcache structures.", + "Number of bytes in use by mcache structures. Equals to /memory/classes/metadata/mcache/inuse:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) }, @@ -164,7 +157,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("mcache_sys_bytes"), - "Number of bytes used for mcache structures obtained from system.", + "Number of bytes used for mcache structures obtained from system. Equals to /memory/classes/metadata/mcache/inuse:bytes + /memory/classes/metadata/mcache/free:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) }, @@ -172,7 +165,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("buck_hash_sys_bytes"), - "Number of bytes used by the profiling bucket hash table.", + "Number of bytes used by the profiling bucket hash table. Equals to /memory/classes/profiling/buckets:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) }, @@ -180,7 +173,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("gc_sys_bytes"), - "Number of bytes used for garbage collection system metadata.", + "Number of bytes used for garbage collection system metadata. Equals to /memory/classes/metadata/other:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) }, @@ -188,7 +181,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("other_sys_bytes"), - "Number of bytes used for other system allocations.", + "Number of bytes used for other system allocations. Equals to /memory/classes/other:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) }, @@ -196,7 +189,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("next_gc_bytes"), - "Number of heap bytes when next garbage collection will take place.", + "Number of heap bytes when next garbage collection will take place. Equals to /gc/heap/goal:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) }, @@ -225,7 +218,7 @@ func newBaseGoCollector() baseGoCollector { nil, nil), gcDesc: NewDesc( "go_gc_duration_seconds", - "A summary of the pause duration of garbage collection cycles.", + "A summary of the wall-time pause (stop-the-world) duration in garbage collection cycles.", nil, nil), gcLastTimeDesc: NewDesc( "go_memstats_last_gc_time_seconds", diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go index 2d8d9f64..51174641 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go @@ -17,6 +17,7 @@ package prometheus import ( + "fmt" "math" "runtime" "runtime/metrics" @@ -153,7 +154,8 @@ func defaultGoCollectorOptions() internal.GoCollectorOptions { "/gc/heap/frees-by-size:bytes": goGCHeapFreesBytes, }, RuntimeMetricRules: []internal.GoCollectorRule{ - //{Matcher: regexp.MustCompile("")}, + // Recommended metrics we want by default from runtime/metrics. + {Matcher: internal.GoCollectorDefaultRuntimeMetrics}, }, } } @@ -203,6 +205,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { // to fail here. This condition is tested in TestExpectedRuntimeMetrics. continue } + help := attachOriginalName(d.Description.Description, d.Name) sampleBuf = append(sampleBuf, metrics.Sample{Name: d.Name}) sampleMap[d.Name] = &sampleBuf[len(sampleBuf)-1] @@ -214,7 +217,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { m = newBatchHistogram( NewDesc( BuildFQName(namespace, subsystem, name), - d.Description.Description, + help, nil, nil, ), @@ -226,7 +229,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { Namespace: namespace, Subsystem: subsystem, Name: name, - Help: d.Description.Description, + Help: help, }, ) } else { @@ -234,7 +237,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { Namespace: namespace, Subsystem: subsystem, Name: name, - Help: d.Description.Description, + Help: help, }) } metricSet = append(metricSet, m) @@ -284,6 +287,10 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { } } +func attachOriginalName(desc, origName string) string { + return fmt.Sprintf("%s Sourced from %s", desc, origName) +} + // Describe returns all descriptions of the collector. func (c *goCollector) Describe(ch chan<- *Desc) { c.base.Describe(ch) @@ -376,13 +383,13 @@ func unwrapScalarRMValue(v metrics.Value) float64 { // // This should never happen because we always populate our metric // set from the runtime/metrics package. - panic("unexpected unsupported metric") + panic("unexpected bad kind metric") default: // Unsupported metric kind. // // This should never happen because we check for this during initialization // and flag and filter metrics whose kinds we don't understand. - panic("unexpected unsupported metric kind") + panic(fmt.Sprintf("unexpected unsupported metric: %v", v.Kind())) } } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index b5c8bcb3..519db348 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -440,7 +440,7 @@ type HistogramOpts struct { // constant (or any negative float value). NativeHistogramZeroThreshold float64 - // The remaining fields define a strategy to limit the number of + // The next three fields define a strategy to limit the number of // populated sparse buckets. If NativeHistogramMaxBucketNumber is left // at zero, the number of buckets is not limited. (Note that this might // lead to unbounded memory consumption if the values observed by the @@ -473,6 +473,22 @@ type HistogramOpts struct { NativeHistogramMinResetDuration time.Duration NativeHistogramMaxZeroThreshold float64 + // NativeHistogramMaxExemplars limits the number of exemplars + // that are kept in memory for each native histogram. If you leave it at + // zero, a default value of 10 is used. If no exemplars should be kept specifically + // for native histograms, set it to a negative value. (Scrapers can + // still use the exemplars exposed for classic buckets, which are managed + // independently.) + NativeHistogramMaxExemplars int + // NativeHistogramExemplarTTL is only checked once + // NativeHistogramMaxExemplars is exceeded. In that case, the + // oldest exemplar is removed if it is older than NativeHistogramExemplarTTL. + // Otherwise, the older exemplar in the pair of exemplars that are closest + // together (on an exponential scale) is removed. + // If NativeHistogramExemplarTTL is left at its zero value, a default value of + // 5m is used. To always delete the oldest exemplar, set it to a negative value. + NativeHistogramExemplarTTL time.Duration + // now is for testing purposes, by default it's time.Now. now func() time.Time @@ -532,6 +548,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr if opts.afterFunc == nil { opts.afterFunc = time.AfterFunc } + h := &histogram{ desc: desc, upperBounds: opts.Buckets, @@ -556,6 +573,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr h.nativeHistogramZeroThreshold = DefNativeHistogramZeroThreshold } // Leave h.nativeHistogramZeroThreshold at 0 otherwise. h.nativeHistogramSchema = pickSchema(opts.NativeHistogramBucketFactor) + h.nativeExemplars = makeNativeExemplars(opts.NativeHistogramExemplarTTL, opts.NativeHistogramMaxExemplars) } for i, upperBound := range h.upperBounds { if i < len(h.upperBounds)-1 { @@ -725,7 +743,8 @@ type histogram struct { // resetScheduled is protected by mtx. It is true if a reset is // scheduled for a later time (when nativeHistogramMinResetDuration has // passed). - resetScheduled bool + resetScheduled bool + nativeExemplars nativeExemplars // now is for testing purposes, by default it's time.Now. now func() time.Time @@ -742,6 +761,9 @@ func (h *histogram) Observe(v float64) { h.observe(v, h.findBucket(v)) } +// ObserveWithExemplar should not be called in a high-frequency setting +// for a native histogram with configured exemplars. For this case, +// the implementation isn't lock-free and might suffer from lock contention. func (h *histogram) ObserveWithExemplar(v float64, e Labels) { i := h.findBucket(v) h.observe(v, i) @@ -821,6 +843,13 @@ func (h *histogram) Write(out *dto.Metric) error { Length: proto.Uint32(0), }} } + + if h.nativeExemplars.isEnabled() { + h.nativeExemplars.Lock() + his.Exemplars = append(his.Exemplars, h.nativeExemplars.exemplars...) + h.nativeExemplars.Unlock() + } + } addAndResetCounts(hotCounts, coldCounts) return nil @@ -1091,8 +1120,10 @@ func (h *histogram) resetCounts(counts *histogramCounts) { deleteSyncMap(&counts.nativeHistogramBucketsPositive) } -// updateExemplar replaces the exemplar for the provided bucket. With empty -// labels, it's a no-op. It panics if any of the labels is invalid. +// updateExemplar replaces the exemplar for the provided classic bucket. +// With empty labels, it's a no-op. It panics if any of the labels is invalid. +// If histogram is native, the exemplar will be cached into nativeExemplars, +// which has a limit, and will remove one exemplar when limit is reached. func (h *histogram) updateExemplar(v float64, bucket int, l Labels) { if l == nil { return @@ -1102,6 +1133,10 @@ func (h *histogram) updateExemplar(v float64, bucket int, l Labels) { panic(err) } h.exemplars[bucket].Store(e) + doSparse := h.nativeHistogramSchema > math.MinInt32 && !math.IsNaN(v) + if doSparse { + h.nativeExemplars.addExemplar(e) + } } // HistogramVec is a Collector that bundles a set of Histograms that all share the @@ -1336,6 +1371,48 @@ func MustNewConstHistogram( return m } +// NewConstHistogramWithCreatedTimestamp does the same thing as NewConstHistogram but sets the created timestamp. +func NewConstHistogramWithCreatedTimestamp( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + ct time.Time, + labelValues ...string, +) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { + return nil, err + } + return &constHistogram{ + desc: desc, + count: count, + sum: sum, + buckets: buckets, + labelPairs: MakeLabelPairs(desc, labelValues), + createdTs: timestamppb.New(ct), + }, nil +} + +// MustNewConstHistogramWithCreatedTimestamp is a version of NewConstHistogramWithCreatedTimestamp that panics where +// NewConstHistogramWithCreatedTimestamp would have returned an error. +func MustNewConstHistogramWithCreatedTimestamp( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + ct time.Time, + labelValues ...string, +) Metric { + m, err := NewConstHistogramWithCreatedTimestamp(desc, count, sum, buckets, ct, labelValues...) + if err != nil { + panic(err) + } + return m +} + type buckSort []*dto.Bucket func (s buckSort) Len() int { @@ -1575,3 +1652,186 @@ func addAndResetCounts(hot, cold *histogramCounts) { atomic.AddUint64(&hot.nativeHistogramZeroBucket, atomic.LoadUint64(&cold.nativeHistogramZeroBucket)) atomic.StoreUint64(&cold.nativeHistogramZeroBucket, 0) } + +type nativeExemplars struct { + sync.Mutex + + // Time-to-live for exemplars, it is set to -1 if exemplars are disabled, that is NativeHistogramMaxExemplars is below 0. + // The ttl is used on insertion to remove an exemplar that is older than ttl, if present. + ttl time.Duration + + exemplars []*dto.Exemplar +} + +func (n *nativeExemplars) isEnabled() bool { + return n.ttl != -1 +} + +func makeNativeExemplars(ttl time.Duration, maxCount int) nativeExemplars { + if ttl == 0 { + ttl = 5 * time.Minute + } + + if maxCount == 0 { + maxCount = 10 + } + + if maxCount < 0 { + maxCount = 0 + ttl = -1 + } + + return nativeExemplars{ + ttl: ttl, + exemplars: make([]*dto.Exemplar, 0, maxCount), + } +} + +func (n *nativeExemplars) addExemplar(e *dto.Exemplar) { + if !n.isEnabled() { + return + } + + n.Lock() + defer n.Unlock() + + // When the number of exemplars has not yet exceeded or + // is equal to cap(n.exemplars), then + // insert the new exemplar directly. + if len(n.exemplars) < cap(n.exemplars) { + var nIdx int + for nIdx = 0; nIdx < len(n.exemplars); nIdx++ { + if *e.Value < *n.exemplars[nIdx].Value { + break + } + } + n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, n.exemplars[nIdx:]...)...) + return + } + + if len(n.exemplars) == 1 { + // When the number of exemplars is 1, then + // replace the existing exemplar with the new exemplar. + n.exemplars[0] = e + return + } + // From this point on, the number of exemplars is greater than 1. + + // When the number of exemplars exceeds the limit, remove one exemplar. + var ( + ot = time.Time{} // Oldest timestamp seen. Initial value doesn't matter as we replace it due to otIdx == -1 in the loop. + otIdx = -1 // Index of the exemplar with the oldest timestamp. + + md = -1.0 // Logarithm of the delta of the closest pair of exemplars. + + // The insertion point of the new exemplar in the exemplars slice after insertion. + // This is calculated purely based on the order of the exemplars by value. + // nIdx == len(n.exemplars) means the new exemplar is to be inserted after the end. + nIdx = -1 + + // rIdx is ultimately the index for the exemplar that we are replacing with the new exemplar. + // The aim is to keep a good spread of exemplars by value and not let them bunch up too much. + // It is calculated in 3 steps: + // 1. First we set rIdx to the index of the older exemplar within the closest pair by value. + // That is the following will be true (on log scale): + // either the exemplar pair on index (rIdx-1, rIdx) or (rIdx, rIdx+1) will have + // the closest values to each other from all pairs. + // For example, suppose the values are distributed like this: + // |-----------x-------------x----------------x----x-----| + // ^--rIdx as this is older. + // Or like this: + // |-----------x-------------x----------------x----x-----| + // ^--rIdx as this is older. + // 2. If there is an exemplar that expired, then we simple reset rIdx to that index. + // 3. We check if by inserting the new exemplar we would create a closer pair at + // (nIdx-1, nIdx) or (nIdx, nIdx+1) and set rIdx to nIdx-1 or nIdx accordingly to + // keep the spread of exemplars by value; otherwise we keep rIdx as it is. + rIdx = -1 + cLog float64 // Logarithm of the current exemplar. + pLog float64 // Logarithm of the previous exemplar. + ) + + for i, exemplar := range n.exemplars { + // Find the exemplar with the oldest timestamp. + if otIdx == -1 || exemplar.Timestamp.AsTime().Before(ot) { + ot = exemplar.Timestamp.AsTime() + otIdx = i + } + + // Find the index at which to insert new the exemplar. + if nIdx == -1 && *e.Value <= *exemplar.Value { + nIdx = i + } + + // Find the two closest exemplars and pick the one the with older timestamp. + pLog = cLog + cLog = math.Log(exemplar.GetValue()) + if i == 0 { + continue + } + diff := math.Abs(cLog - pLog) + if md == -1 || diff < md { + // The closest exemplar pair is at index: i-1, i. + // Choose the exemplar with the older timestamp for replacement. + md = diff + if n.exemplars[i].Timestamp.AsTime().Before(n.exemplars[i-1].Timestamp.AsTime()) { + rIdx = i + } else { + rIdx = i - 1 + } + } + + } + + // If all existing exemplar are smaller than new exemplar, + // then the exemplar should be inserted at the end. + if nIdx == -1 { + nIdx = len(n.exemplars) + } + // Here, we have the following relationships: + // n.exemplars[nIdx-1].Value < e.Value (if nIdx > 0) + // e.Value <= n.exemplars[nIdx].Value (if nIdx < len(n.exemplars)) + + if otIdx != -1 && e.Timestamp.AsTime().Sub(ot) > n.ttl { + // If the oldest exemplar has expired, then replace it with the new exemplar. + rIdx = otIdx + } else { + // In the previous for loop, when calculating the closest pair of exemplars, + // we did not take into account the newly inserted exemplar. + // So we need to calculate with the newly inserted exemplar again. + elog := math.Log(e.GetValue()) + if nIdx > 0 { + diff := math.Abs(elog - math.Log(n.exemplars[nIdx-1].GetValue())) + if diff < md { + // The value we are about to insert is closer to the previous exemplar at the insertion point than what we calculated before in rIdx. + // v--rIdx + // |-----------x-n-----------x----------------x----x-----| + // nIdx-1--^ ^--new exemplar value + // Do not make the spread worse, replace nIdx-1 and not rIdx. + md = diff + rIdx = nIdx - 1 + } + } + if nIdx < len(n.exemplars) { + diff := math.Abs(math.Log(n.exemplars[nIdx].GetValue()) - elog) + if diff < md { + // The value we are about to insert is closer to the next exemplar at the insertion point than what we calculated before in rIdx. + // v--rIdx + // |-----------x-----------n-x----------------x----x-----| + // new exemplar value--^ ^--nIdx + // Do not make the spread worse, replace nIdx-1 and not rIdx. + rIdx = nIdx + } + } + } + + // Adjust the slice according to rIdx and nIdx. + switch { + case rIdx == nIdx: + n.exemplars[nIdx] = e + case rIdx < nIdx: + n.exemplars = append(n.exemplars[:rIdx], append(n.exemplars[rIdx+1:nIdx], append([]*dto.Exemplar{e}, n.exemplars[nIdx:]...)...)...) + case rIdx > nIdx: + n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, append(n.exemplars[nIdx:rIdx], n.exemplars[rIdx+1:]...)...)...) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go index 723b45d6..a4fa6eab 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go @@ -30,3 +30,5 @@ type GoCollectorOptions struct { RuntimeMetricSumForHist map[string]string RuntimeMetricRules []GoCollectorRule } + +var GoCollectorDefaultRuntimeMetrics = regexp.MustCompile(`/gc/gogc:percent|/gc/gomemlimit:bytes|/sched/gomaxprocs:threads`) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go index f018e572..9d9b81ab 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -234,7 +234,7 @@ func NewMetricWithExemplars(m Metric, exemplars ...Exemplar) (Metric, error) { ) for i, e := range exemplars { ts := e.Timestamp - if ts == (time.Time{}) { + if ts.IsZero() { ts = now } exs[i], err = newExemplar(e.Value, ts, e.Labels) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go index 8548dd18..62a4e7ad 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go @@ -22,14 +22,15 @@ import ( ) type processCollector struct { - collectFn func(chan<- Metric) - pidFn func() (int, error) - reportErrors bool - cpuTotal *Desc - openFDs, maxFDs *Desc - vsize, maxVsize *Desc - rss *Desc - startTime *Desc + collectFn func(chan<- Metric) + pidFn func() (int, error) + reportErrors bool + cpuTotal *Desc + openFDs, maxFDs *Desc + vsize, maxVsize *Desc + rss *Desc + startTime *Desc + inBytes, outBytes *Desc } // ProcessCollectorOpts defines the behavior of a process metrics collector @@ -100,6 +101,16 @@ func NewProcessCollector(opts ProcessCollectorOpts) Collector { "Start time of the process since unix epoch in seconds.", nil, nil, ), + inBytes: NewDesc( + ns+"process_network_receive_bytes_total", + "Number of bytes received by the process over the network.", + nil, nil, + ), + outBytes: NewDesc( + ns+"process_network_transmit_bytes_total", + "Number of bytes sent by the process over the network.", + nil, nil, + ), } if opts.PidFn == nil { @@ -129,6 +140,8 @@ func (c *processCollector) Describe(ch chan<- *Desc) { ch <- c.maxVsize ch <- c.rss ch <- c.startTime + ch <- c.inBytes + ch <- c.outBytes } // Collect returns the current state of all metrics of the collector. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go index 8c1136ce..14d56d2d 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go @@ -63,4 +63,18 @@ func (c *processCollector) processCollect(ch chan<- Metric) { } else { c.reportError(ch, nil, err) } + + if netstat, err := p.Netstat(); err == nil { + var inOctets, outOctets float64 + if netstat.IpExt.InOctets != nil { + inOctets = *netstat.IpExt.InOctets + } + if netstat.IpExt.OutOctets != nil { + outOctets = *netstat.IpExt.OutOctets + } + ch <- MustNewConstMetric(c.inBytes, CounterValue, inOctets) + ch <- MustNewConstMetric(c.outBytes, CounterValue, outOctets) + } else { + c.reportError(ch, nil, err) + } } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go index 9819917b..315eab5f 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go @@ -76,6 +76,12 @@ func (r *responseWriterDelegator) Write(b []byte) (int, error) { return n, err } +// Unwrap lets http.ResponseController get the underlying http.ResponseWriter, +// by implementing the [rwUnwrapper](https://cs.opensource.google/go/go/+/refs/tags/go1.21.4:src/net/http/responsecontroller.go;l=42-44) interface. +func (r *responseWriterDelegator) Unwrap() http.ResponseWriter { + return r.ResponseWriter +} + type ( closeNotifierDelegator struct{ *responseWriterDelegator } flusherDelegator struct{ *responseWriterDelegator } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go index 09b8d2fb..e598e66e 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go @@ -38,12 +38,13 @@ import ( "io" "net/http" "strconv" - "strings" "sync" "time" + "github.com/klauspost/compress/zstd" "github.com/prometheus/common/expfmt" + "github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil" "github.com/prometheus/client_golang/prometheus" ) @@ -54,6 +55,18 @@ const ( processStartTimeHeader = "Process-Start-Time-Unix" ) +// Compression represents the content encodings handlers support for the HTTP +// responses. +type Compression string + +const ( + Identity Compression = "identity" + Gzip Compression = "gzip" + Zstd Compression = "zstd" +) + +var defaultCompressionFormats = []Compression{Identity, Gzip, Zstd} + var gzipPool = sync.Pool{ New: func() interface{} { return gzip.NewWriter(nil) @@ -122,6 +135,18 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO } } + // Select compression formats to offer based on default or user choice. + var compressions []string + if !opts.DisableCompression { + offers := defaultCompressionFormats + if len(opts.OfferedCompressions) > 0 { + offers = opts.OfferedCompressions + } + for _, comp := range offers { + compressions = append(compressions, string(comp)) + } + } + h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) { if !opts.ProcessStartTime.IsZero() { rsp.Header().Set(processStartTimeHeader, strconv.FormatInt(opts.ProcessStartTime.Unix(), 10)) @@ -165,21 +190,23 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO } else { contentType = expfmt.Negotiate(req.Header) } - header := rsp.Header() - header.Set(contentTypeHeader, string(contentType)) + rsp.Header().Set(contentTypeHeader, string(contentType)) - w := io.Writer(rsp) - if !opts.DisableCompression && gzipAccepted(req.Header) { - header.Set(contentEncodingHeader, "gzip") - gz := gzipPool.Get().(*gzip.Writer) - defer gzipPool.Put(gz) + w, encodingHeader, closeWriter, err := negotiateEncodingWriter(req, rsp, compressions) + if err != nil { + if opts.ErrorLog != nil { + opts.ErrorLog.Println("error getting writer", err) + } + w = io.Writer(rsp) + encodingHeader = string(Identity) + } - gz.Reset(w) - defer gz.Close() + defer closeWriter() - w = gz + // Set Content-Encoding only when data is compressed + if encodingHeader != string(Identity) { + rsp.Header().Set(contentEncodingHeader, encodingHeader) } - enc := expfmt.NewEncoder(w, contentType) // handleError handles the error according to opts.ErrorHandling @@ -343,9 +370,19 @@ type HandlerOpts struct { // no effect on the HTTP status code because ErrorHandling is set to // ContinueOnError. Registry prometheus.Registerer - // If DisableCompression is true, the handler will never compress the - // response, even if requested by the client. + // DisableCompression disables the response encoding (compression) and + // encoding negotiation. If true, the handler will + // never compress the response, even if requested + // by the client and the OfferedCompressions field is set. DisableCompression bool + // OfferedCompressions is a set of encodings (compressions) handler will + // try to offer when negotiating with the client. This defaults to identity, gzip + // and zstd. + // NOTE: If handler can't agree with the client on the encodings or + // unsupported or empty encodings are set in OfferedCompressions, + // handler always fallbacks to no compression (identity), for + // compatibility reasons. In such cases ErrorLog will be used if set. + OfferedCompressions []Compression // The number of concurrent HTTP requests is limited to // MaxRequestsInFlight. Additional requests are responded to with 503 // Service Unavailable and a suitable message in the body. If @@ -381,19 +418,6 @@ type HandlerOpts struct { ProcessStartTime time.Time } -// gzipAccepted returns whether the client will accept gzip-encoded content. -func gzipAccepted(header http.Header) bool { - a := header.Get(acceptEncodingHeader) - parts := strings.Split(a, ",") - for _, part := range parts { - part = strings.TrimSpace(part) - if part == "gzip" || strings.HasPrefix(part, "gzip;") { - return true - } - } - return false -} - // httpError removes any content-encoding header and then calls http.Error with // the provided error and http.StatusInternalServerError. Error contents is // supposed to be uncompressed plain text. Same as with a plain http.Error, this @@ -406,3 +430,38 @@ func httpError(rsp http.ResponseWriter, err error) { http.StatusInternalServerError, ) } + +// negotiateEncodingWriter reads the Accept-Encoding header from a request and +// selects the right compression based on an allow-list of supported +// compressions. It returns a writer implementing the compression and an the +// correct value that the caller can set in the response header. +func negotiateEncodingWriter(r *http.Request, rw io.Writer, compressions []string) (_ io.Writer, encodingHeaderValue string, closeWriter func(), _ error) { + if len(compressions) == 0 { + return rw, string(Identity), func() {}, nil + } + + // TODO(mrueg): Replace internal/github.com/gddo once https://github.com/golang/go/issues/19307 is implemented. + selected := httputil.NegotiateContentEncoding(r, compressions) + + switch selected { + case "zstd": + // TODO(mrueg): Replace klauspost/compress with stdlib implementation once https://github.com/golang/go/issues/62513 is implemented. + z, err := zstd.NewWriter(rw, zstd.WithEncoderLevel(zstd.SpeedFastest)) + if err != nil { + return nil, "", func() {}, err + } + + z.Reset(rw) + return z, selected, func() { _ = z.Close() }, nil + case "gzip": + gz := gzipPool.Get().(*gzip.Writer) + gz.Reset(rw) + return gz, selected, func() { _ = gz.Close(); gzipPool.Put(gz) }, nil + case "identity": + // This means the content is not compressed. + return rw, selected, func() {}, nil + default: + // The content encoding was not implemented yet. + return nil, "", func() {}, fmt.Errorf("content compression format not recognized: %s. Valid formats are: %s", selected, defaultCompressionFormats) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go index 5e2ced25..c6fd2f58 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -314,16 +314,17 @@ func (r *Registry) Register(c Collector) error { if dimHash != desc.dimHash { return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc) } - } else { - // ...then check the new descriptors already seen. - if dimHash, exists := newDimHashesByName[desc.fqName]; exists { - if dimHash != desc.dimHash { - return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) - } - } else { - newDimHashesByName[desc.fqName] = desc.dimHash + continue + } + + // ...then check the new descriptors already seen. + if dimHash, exists := newDimHashesByName[desc.fqName]; exists { + if dimHash != desc.dimHash { + return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) } + continue } + newDimHashesByName[desc.fqName] = desc.dimHash } // A Collector yielding no Desc at all is considered unchecked. if len(newDescIDs) == 0 { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go index 14627044..1ab0e479 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -783,3 +783,45 @@ func MustNewConstSummary( } return m } + +// NewConstSummaryWithCreatedTimestamp does the same thing as NewConstSummary but sets the created timestamp. +func NewConstSummaryWithCreatedTimestamp( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + ct time.Time, + labelValues ...string, +) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { + return nil, err + } + return &constSummary{ + desc: desc, + count: count, + sum: sum, + quantiles: quantiles, + labelPairs: MakeLabelPairs(desc, labelValues), + createdTs: timestamppb.New(ct), + }, nil +} + +// MustNewConstSummaryWithCreatedTimestamp is a version of NewConstSummaryWithCreatedTimestamp that panics where +// NewConstSummaryWithCreatedTimestamp would have returned an error. +func MustNewConstSummaryWithCreatedTimestamp( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + ct time.Time, + labelValues ...string, +) Metric { + m, err := NewConstSummaryWithCreatedTimestamp(desc, count, sum, quantiles, ct, labelValues...) + if err != nil { + panic(err) + } + return m +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go index 955cfd59..2c808eec 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -507,7 +507,7 @@ func (m *metricMap) getOrCreateMetricWithLabelValues( return metric } -// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value +// getOrCreateMetricWithLabels retrieves the metric by hash and label value // or creates it and returns the new one. // // This function holds the mutex. diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go index 25cfaa21..1448439b 100644 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -45,7 +45,7 @@ func ResponseFormat(h http.Header) Format { mediatype, params, err := mime.ParseMediaType(ct) if err != nil { - return fmtUnknown + return FmtUnknown } const textType = "text/plain" @@ -53,21 +53,21 @@ func ResponseFormat(h http.Header) Format { switch mediatype { case ProtoType: if p, ok := params["proto"]; ok && p != ProtoProtocol { - return fmtUnknown + return FmtUnknown } if e, ok := params["encoding"]; ok && e != "delimited" { - return fmtUnknown + return FmtUnknown } - return fmtProtoDelim + return FmtProtoDelim case textType: if v, ok := params["version"]; ok && v != TextVersion { - return fmtUnknown + return FmtUnknown } - return fmtText + return FmtText } - return fmtUnknown + return FmtUnknown } // NewDecoder returns a new decoder based on the given input format. diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go index 7f6cbe7d..d7f3d76f 100644 --- a/vendor/github.com/prometheus/common/expfmt/encode.go +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -21,9 +21,10 @@ import ( "google.golang.org/protobuf/encoding/protodelim" "google.golang.org/protobuf/encoding/prototext" - "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg" "github.com/prometheus/common/model" + "github.com/munnerz/goautoneg" + dto "github.com/prometheus/client_model/go" ) @@ -67,7 +68,7 @@ func Negotiate(h http.Header) Format { if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" { switch Format(escapeParam) { case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues: - escapingScheme = Format(fmt.Sprintf("; escaping=%s", escapeParam)) + escapingScheme = Format("; escaping=" + escapeParam) default: // If the escaping parameter is unknown, ignore it. } @@ -76,18 +77,18 @@ func Negotiate(h http.Header) Format { if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { switch ac.Params["encoding"] { case "delimited": - return fmtProtoDelim + escapingScheme + return FmtProtoDelim + escapingScheme case "text": - return fmtProtoText + escapingScheme + return FmtProtoText + escapingScheme case "compact-text": - return fmtProtoCompact + escapingScheme + return FmtProtoCompact + escapingScheme } } if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { - return fmtText + escapingScheme + return FmtText + escapingScheme } } - return fmtText + escapingScheme + return FmtText + escapingScheme } // NegotiateIncludingOpenMetrics works like Negotiate but includes @@ -100,7 +101,7 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format { if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" { switch Format(escapeParam) { case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues: - escapingScheme = Format(fmt.Sprintf("; escaping=%s", escapeParam)) + escapingScheme = Format("; escaping=" + escapeParam) default: // If the escaping parameter is unknown, ignore it. } @@ -109,26 +110,26 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format { if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { switch ac.Params["encoding"] { case "delimited": - return fmtProtoDelim + escapingScheme + return FmtProtoDelim + escapingScheme case "text": - return fmtProtoText + escapingScheme + return FmtProtoText + escapingScheme case "compact-text": - return fmtProtoCompact + escapingScheme + return FmtProtoCompact + escapingScheme } } if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { - return fmtText + escapingScheme + return FmtText + escapingScheme } if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion_0_0_1 || ver == OpenMetricsVersion_1_0_0 || ver == "") { switch ver { case OpenMetricsVersion_1_0_0: - return fmtOpenMetrics_1_0_0 + escapingScheme + return FmtOpenMetrics_1_0_0 + escapingScheme default: - return fmtOpenMetrics_0_0_1 + escapingScheme + return FmtOpenMetrics_0_0_1 + escapingScheme } } } - return fmtText + escapingScheme + return FmtText + escapingScheme } // NewEncoder returns a new encoder based on content type negotiation. All diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go index 051b38cd..b2688656 100644 --- a/vendor/github.com/prometheus/common/expfmt/expfmt.go +++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -15,7 +15,7 @@ package expfmt import ( - "fmt" + "errors" "strings" "github.com/prometheus/common/model" @@ -32,24 +32,31 @@ type Format string // it on the wire, new content-type strings will have to be agreed upon and // added here. const ( - TextVersion = "0.0.4" - ProtoType = `application/vnd.google.protobuf` - ProtoProtocol = `io.prometheus.client.MetricFamily` - protoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" + TextVersion = "0.0.4" + ProtoType = `application/vnd.google.protobuf` + ProtoProtocol = `io.prometheus.client.MetricFamily` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead. + ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" OpenMetricsType = `application/openmetrics-text` OpenMetricsVersion_0_0_1 = "0.0.1" OpenMetricsVersion_1_0_0 = "1.0.0" - // The Content-Type values for the different wire protocols. Note that these - // values are now unexported. If code was relying on comparisons to these - // constants, instead use FormatType(). - fmtUnknown Format = `` - fmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` - fmtProtoDelim Format = protoFmt + ` encoding=delimited` - fmtProtoText Format = protoFmt + ` encoding=text` - fmtProtoCompact Format = protoFmt + ` encoding=compact-text` - fmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8` - fmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8` + // The Content-Type values for the different wire protocols. Do not do direct + // comparisons to these constants, instead use the comparison functions. + // Deprecated: Use expfmt.NewFormat(expfmt.TypeUnknown) instead. + FmtUnknown Format = `` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeTextPlain) instead. + FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoDelim) instead. + FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoText) instead. + FmtProtoText Format = ProtoFmt + ` encoding=text` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead. + FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead. + FmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead. + FmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8` ) const ( @@ -79,17 +86,17 @@ const ( func NewFormat(t FormatType) Format { switch t { case TypeProtoCompact: - return fmtProtoCompact + return FmtProtoCompact case TypeProtoDelim: - return fmtProtoDelim + return FmtProtoDelim case TypeProtoText: - return fmtProtoText + return FmtProtoText case TypeTextPlain: - return fmtText + return FmtText case TypeOpenMetrics: - return fmtOpenMetrics_1_0_0 + return FmtOpenMetrics_1_0_0 default: - return fmtUnknown + return FmtUnknown } } @@ -97,12 +104,35 @@ func NewFormat(t FormatType) Format { // specified version number. func NewOpenMetricsFormat(version string) (Format, error) { if version == OpenMetricsVersion_0_0_1 { - return fmtOpenMetrics_0_0_1, nil + return FmtOpenMetrics_0_0_1, nil } if version == OpenMetricsVersion_1_0_0 { - return fmtOpenMetrics_1_0_0, nil + return FmtOpenMetrics_1_0_0, nil } - return fmtUnknown, fmt.Errorf("unknown open metrics version string") + return FmtUnknown, errors.New("unknown open metrics version string") +} + +// WithEscapingScheme returns a copy of Format with the specified escaping +// scheme appended to the end. If an escaping scheme already exists it is +// removed. +func (f Format) WithEscapingScheme(s model.EscapingScheme) Format { + var terms []string + for _, p := range strings.Split(string(f), ";") { + toks := strings.Split(p, "=") + if len(toks) != 2 { + trimmed := strings.TrimSpace(p) + if len(trimmed) > 0 { + terms = append(terms, trimmed) + } + continue + } + key := strings.TrimSpace(toks[0]) + if key != model.EscapingKey { + terms = append(terms, strings.TrimSpace(p)) + } + } + terms = append(terms, model.EscapingKey+"="+s.String()) + return Format(strings.Join(terms, "; ")) } // FormatType deduces an overall FormatType for the given format. diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go index 353c5e93..f1c495dd 100644 --- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go +++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go @@ -152,8 +152,8 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E if metricType == dto.MetricType_COUNTER && strings.HasSuffix(compliantName, "_total") { compliantName = name[:len(name)-6] } - if toOM.withUnit && in.Unit != nil && !strings.HasSuffix(compliantName, fmt.Sprintf("_%s", *in.Unit)) { - compliantName = compliantName + fmt.Sprintf("_%s", *in.Unit) + if toOM.withUnit && in.Unit != nil && !strings.HasSuffix(compliantName, "_"+*in.Unit) { + compliantName = compliantName + "_" + *in.Unit } // Comments, first HELP, then TYPE. @@ -477,7 +477,7 @@ func writeOpenMetricsNameAndLabelPairs( if name != "" { // If the name does not pass the legacy validity check, we must put the // metric name inside the braces, quoted. - if !model.IsValidLegacyMetricName(model.LabelValue(name)) { + if !model.IsValidLegacyMetricName(name) { metricInsideBraces = true err := w.WriteByte(separator) written++ diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go index f9b8265a..4b86434b 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_create.go +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -354,7 +354,7 @@ func writeNameAndLabelPairs( if name != "" { // If the name does not pass the legacy validity check, we must put the // metric name inside the braces. - if !model.IsValidLegacyMetricName(model.LabelValue(name)) { + if !model.IsValidLegacyMetricName(name) { metricInsideBraces = true err := w.WriteByte(separator) written++ @@ -498,7 +498,7 @@ func writeInt(w enhancedWriter, i int64) (int, error) { // writeName writes a string as-is if it complies with the legacy naming // scheme, or escapes it in double quotes if not. func writeName(w enhancedWriter, name string) (int, error) { - if model.IsValidLegacyMetricName(model.LabelValue(name)) { + if model.IsValidLegacyMetricName(name) { return w.WriteString(name) } var written int diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go index 26490211..b4607fe4 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -22,9 +22,9 @@ import ( "math" "strconv" "strings" + "unicode/utf8" dto "github.com/prometheus/client_model/go" - "google.golang.org/protobuf/proto" "github.com/prometheus/common/model" @@ -60,6 +60,7 @@ type TextParser struct { currentMF *dto.MetricFamily currentMetric *dto.Metric currentLabelPair *dto.LabelPair + currentLabelPairs []*dto.LabelPair // Temporarily stores label pairs while parsing a metric line. // The remaining member variables are only used for summaries/histograms. currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le' @@ -74,6 +75,9 @@ type TextParser struct { // count and sum of that summary/histogram. currentIsSummaryCount, currentIsSummarySum bool currentIsHistogramCount, currentIsHistogramSum bool + // These indicate if the metric name from the current line being parsed is inside + // braces and if that metric name was found respectively. + currentMetricIsInsideBraces, currentMetricInsideBracesIsPresent bool } // TextToMetricFamilies reads 'in' as the simple and flat text-based exchange @@ -137,12 +141,15 @@ func (p *TextParser) reset(in io.Reader) { } p.currentQuantile = math.NaN() p.currentBucket = math.NaN() + p.currentMF = nil } // startOfLine represents the state where the next byte read from p.buf is the // start of a line (or whitespace leading up to it). func (p *TextParser) startOfLine() stateFn { p.lineCount++ + p.currentMetricIsInsideBraces = false + p.currentMetricInsideBracesIsPresent = false if p.skipBlankTab(); p.err != nil { // This is the only place that we expect to see io.EOF, // which is not an error but the signal that we are done. @@ -158,6 +165,9 @@ func (p *TextParser) startOfLine() stateFn { return p.startComment case '\n': return p.startOfLine // Empty line, start the next one. + case '{': + p.currentMetricIsInsideBraces = true + return p.readingLabels } return p.readingMetricName } @@ -275,6 +285,8 @@ func (p *TextParser) startLabelName() stateFn { return nil // Unexpected end of input. } if p.currentByte == '}' { + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...) + p.currentLabelPairs = nil if p.skipBlankTab(); p.err != nil { return nil // Unexpected end of input. } @@ -287,6 +299,45 @@ func (p *TextParser) startLabelName() stateFn { p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName())) return nil } + if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte != '=' { + if p.currentMetricIsInsideBraces { + if p.currentMetricInsideBracesIsPresent { + p.parseError(fmt.Sprintf("multiple metric names for metric %q", p.currentMF.GetName())) + return nil + } + switch p.currentByte { + case ',': + p.setOrCreateCurrentMF() + if p.currentMF.Type == nil { + p.currentMF.Type = dto.MetricType_UNTYPED.Enum() + } + p.currentMetric = &dto.Metric{} + p.currentMetricInsideBracesIsPresent = true + return p.startLabelName + case '}': + p.setOrCreateCurrentMF() + if p.currentMF.Type == nil { + p.currentMF.Type = dto.MetricType_UNTYPED.Enum() + } + p.currentMetric = &dto.Metric{} + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...) + p.currentLabelPairs = nil + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingValue + default: + p.parseError(fmt.Sprintf("unexpected end of metric name %q", p.currentByte)) + return nil + } + } + p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) + p.currentLabelPairs = nil + return nil + } p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())} if p.currentLabelPair.GetName() == string(model.MetricNameLabel) { p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel)) @@ -296,23 +347,17 @@ func (p *TextParser) startLabelName() stateFn { // labels to 'real' labels. if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) && !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) { - p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair) - } - if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte != '=' { - p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) - return nil + p.currentLabelPairs = append(p.currentLabelPairs, p.currentLabelPair) } // Check for duplicate label names. labels := make(map[string]struct{}) - for _, l := range p.currentMetric.Label { + for _, l := range p.currentLabelPairs { lName := l.GetName() if _, exists := labels[lName]; !exists { labels[lName] = struct{}{} } else { p.parseError(fmt.Sprintf("duplicate label names for metric %q", p.currentMF.GetName())) + p.currentLabelPairs = nil return nil } } @@ -345,6 +390,7 @@ func (p *TextParser) startLabelValue() stateFn { if p.currentQuantile, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil { // Create a more helpful error message. p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue())) + p.currentLabelPairs = nil return nil } } else { @@ -371,12 +417,19 @@ func (p *TextParser) startLabelValue() stateFn { return p.startLabelName case '}': + if p.currentMF == nil { + p.parseError("invalid metric name") + return nil + } + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...) + p.currentLabelPairs = nil if p.skipBlankTab(); p.err != nil { return nil // Unexpected end of input. } return p.readingValue default: p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue())) + p.currentLabelPairs = nil return nil } } @@ -585,6 +638,8 @@ func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { p.currentToken.WriteByte(p.currentByte) case 'n': p.currentToken.WriteByte('\n') + case '"': + p.currentToken.WriteByte('"') default: p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) return @@ -610,13 +665,45 @@ func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { // but not into p.currentToken. func (p *TextParser) readTokenAsMetricName() { p.currentToken.Reset() + // A UTF-8 metric name must be quoted and may have escaped characters. + quoted := false + escaped := false if !isValidMetricNameStart(p.currentByte) { return } - for { - p.currentToken.WriteByte(p.currentByte) + for p.err == nil { + if escaped { + switch p.currentByte { + case '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + case '"': + p.currentToken.WriteByte('"') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + } else { + switch p.currentByte { + case '"': + quoted = !quoted + if !quoted { + p.currentByte, p.err = p.buf.ReadByte() + return + } + case '\n': + p.parseError(fmt.Sprintf("metric name %q contains unescaped new-line", p.currentToken.String())) + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } p.currentByte, p.err = p.buf.ReadByte() - if p.err != nil || !isValidMetricNameContinuation(p.currentByte) { + if !isValidMetricNameContinuation(p.currentByte, quoted) || (!quoted && p.currentByte == ' ') { return } } @@ -628,13 +715,45 @@ func (p *TextParser) readTokenAsMetricName() { // but not into p.currentToken. func (p *TextParser) readTokenAsLabelName() { p.currentToken.Reset() + // A UTF-8 label name must be quoted and may have escaped characters. + quoted := false + escaped := false if !isValidLabelNameStart(p.currentByte) { return } - for { - p.currentToken.WriteByte(p.currentByte) + for p.err == nil { + if escaped { + switch p.currentByte { + case '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + case '"': + p.currentToken.WriteByte('"') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + } else { + switch p.currentByte { + case '"': + quoted = !quoted + if !quoted { + p.currentByte, p.err = p.buf.ReadByte() + return + } + case '\n': + p.parseError(fmt.Sprintf("label name %q contains unescaped new-line", p.currentToken.String())) + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } p.currentByte, p.err = p.buf.ReadByte() - if p.err != nil || !isValidLabelNameContinuation(p.currentByte) { + if !isValidLabelNameContinuation(p.currentByte, quoted) || (!quoted && p.currentByte == '=') { return } } @@ -660,6 +779,7 @@ func (p *TextParser) readTokenAsLabelValue() { p.currentToken.WriteByte('\n') default: p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + p.currentLabelPairs = nil return } escaped = false @@ -718,19 +838,19 @@ func (p *TextParser) setOrCreateCurrentMF() { } func isValidLabelNameStart(b byte) bool { - return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' + return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == '"' } -func isValidLabelNameContinuation(b byte) bool { - return isValidLabelNameStart(b) || (b >= '0' && b <= '9') +func isValidLabelNameContinuation(b byte, quoted bool) bool { + return isValidLabelNameStart(b) || (b >= '0' && b <= '9') || (quoted && utf8.ValidString(string(b))) } func isValidMetricNameStart(b byte) bool { return isValidLabelNameStart(b) || b == ':' } -func isValidMetricNameContinuation(b byte) bool { - return isValidLabelNameContinuation(b) || b == ':' +func isValidMetricNameContinuation(b byte, quoted bool) bool { + return isValidLabelNameContinuation(b, quoted) || b == ':' } func isBlankOrTab(b byte) bool { @@ -775,7 +895,7 @@ func histogramMetricName(name string) string { func parseFloat(s string) (float64, error) { if strings.ContainsAny(s, "pP_") { - return 0, fmt.Errorf("unsupported character in float") + return 0, errors.New("unsupported character in float") } return strconv.ParseFloat(s, 64) } diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go index 80d1fe94..bd3a39e3 100644 --- a/vendor/github.com/prometheus/common/model/alert.go +++ b/vendor/github.com/prometheus/common/model/alert.go @@ -14,6 +14,7 @@ package model import ( + "errors" "fmt" "time" ) @@ -89,16 +90,16 @@ func (a *Alert) StatusAt(ts time.Time) AlertStatus { // Validate checks whether the alert data is inconsistent. func (a *Alert) Validate() error { if a.StartsAt.IsZero() { - return fmt.Errorf("start time missing") + return errors.New("start time missing") } if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) { - return fmt.Errorf("start time must be before end time") + return errors.New("start time must be before end time") } if err := a.Labels.Validate(); err != nil { return fmt.Errorf("invalid label set: %w", err) } if len(a.Labels) == 0 { - return fmt.Errorf("at least one label pair required") + return errors.New("at least one label pair required") } if err := a.Annotations.Validate(); err != nil { return fmt.Errorf("invalid annotations: %w", err) diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go index 3317ce22..73b7aa3e 100644 --- a/vendor/github.com/prometheus/common/model/labels.go +++ b/vendor/github.com/prometheus/common/model/labels.go @@ -97,26 +97,35 @@ var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") // therewith. type LabelName string -// IsValid returns true iff name matches the pattern of LabelNameRE for legacy -// names, and iff it's valid UTF-8 if NameValidationScheme is set to -// UTF8Validation. For the legacy matching, it does not use LabelNameRE for the -// check but a much faster hardcoded implementation. +// IsValid returns true iff the name matches the pattern of LabelNameRE when +// NameValidationScheme is set to LegacyValidation, or valid UTF-8 if +// NameValidationScheme is set to UTF8Validation. func (ln LabelName) IsValid() bool { if len(ln) == 0 { return false } switch NameValidationScheme { case LegacyValidation: - for i, b := range ln { - if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { - return false - } - } + return ln.IsValidLegacy() case UTF8Validation: return utf8.ValidString(string(ln)) default: panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme)) } +} + +// IsValidLegacy returns true iff name matches the pattern of LabelNameRE for +// legacy names. It does not use LabelNameRE for the check but a much faster +// hardcoded implementation. +func (ln LabelName) IsValidLegacy() bool { + if len(ln) == 0 { + return false + } + for i, b := range ln { + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { + return false + } + } return true } diff --git a/vendor/github.com/prometheus/common/model/labelset_string.go b/vendor/github.com/prometheus/common/model/labelset_string.go index 481c47b4..abb2c900 100644 --- a/vendor/github.com/prometheus/common/model/labelset_string.go +++ b/vendor/github.com/prometheus/common/model/labelset_string.go @@ -11,8 +11,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build go1.21 - package model import ( diff --git a/vendor/github.com/prometheus/common/model/labelset_string_go120.go b/vendor/github.com/prometheus/common/model/labelset_string_go120.go deleted file mode 100644 index c4212685..00000000 --- a/vendor/github.com/prometheus/common/model/labelset_string_go120.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2024 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !go1.21 - -package model - -import ( - "fmt" - "sort" - "strings" -) - -// String was optimized using functions not available for go 1.20 -// or lower. We keep the old implementation for compatibility with client_golang. -// Once client golang drops support for go 1.20 (scheduled for August 2024), this -// file can be removed. -func (l LabelSet) String() string { - labelNames := make([]string, 0, len(l)) - for name := range l { - labelNames = append(labelNames, string(name)) - } - sort.Strings(labelNames) - lstrs := make([]string, 0, len(l)) - for _, name := range labelNames { - lstrs = append(lstrs, fmt.Sprintf("%s=%q", name, l[LabelName(name)])) - } - return fmt.Sprintf("{%s}", strings.Join(lstrs, ", ")) -} diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go index eb865e5a..0daca836 100644 --- a/vendor/github.com/prometheus/common/model/metric.go +++ b/vendor/github.com/prometheus/common/model/metric.go @@ -14,9 +14,11 @@ package model import ( + "errors" "fmt" "regexp" "sort" + "strconv" "strings" "unicode/utf8" @@ -34,10 +36,13 @@ var ( // goroutines are started. NameValidationScheme = LegacyValidation - // NameEscapingScheme defines the default way that names will be - // escaped when presented to systems that do not support UTF-8 names. If the - // Content-Type "escaping" term is specified, that will override this value. - NameEscapingScheme = ValueEncodingEscaping + // NameEscapingScheme defines the default way that names will be escaped when + // presented to systems that do not support UTF-8 names. If the Content-Type + // "escaping" term is specified, that will override this value. + // NameEscapingScheme should not be set to the NoEscaping value. That string + // is used in content negotiation to indicate that a system supports UTF-8 and + // has that feature enabled. + NameEscapingScheme = UnderscoreEscaping ) // ValidationScheme is a Go enum for determining how metric and label names will @@ -161,7 +166,7 @@ func (m Metric) FastFingerprint() Fingerprint { func IsValidMetricName(n LabelValue) bool { switch NameValidationScheme { case LegacyValidation: - return IsValidLegacyMetricName(n) + return IsValidLegacyMetricName(string(n)) case UTF8Validation: if len(n) == 0 { return false @@ -176,7 +181,7 @@ func IsValidMetricName(n LabelValue) bool { // legacy validation scheme regardless of the value of NameValidationScheme. // This function, however, does not use MetricNameRE for the check but a much // faster hardcoded implementation. -func IsValidLegacyMetricName(n LabelValue) bool { +func IsValidLegacyMetricName(n string) bool { if len(n) == 0 { return false } @@ -208,7 +213,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF } // If the name is nil, copy as-is, don't try to escape. - if v.Name == nil || IsValidLegacyMetricName(LabelValue(v.GetName())) { + if v.Name == nil || IsValidLegacyMetricName(v.GetName()) { out.Name = v.Name } else { out.Name = proto.String(EscapeName(v.GetName(), scheme)) @@ -230,7 +235,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF for _, l := range m.Label { if l.GetName() == MetricNameLabel { - if l.Value == nil || IsValidLegacyMetricName(LabelValue(l.GetValue())) { + if l.Value == nil || IsValidLegacyMetricName(l.GetValue()) { escaped.Label = append(escaped.Label, l) continue } @@ -240,7 +245,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF }) continue } - if l.Name == nil || IsValidLegacyMetricName(LabelValue(l.GetName())) { + if l.Name == nil || IsValidLegacyMetricName(l.GetName()) { escaped.Label = append(escaped.Label, l) continue } @@ -256,20 +261,16 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF func metricNeedsEscaping(m *dto.Metric) bool { for _, l := range m.Label { - if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(LabelValue(l.GetValue())) { + if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(l.GetValue()) { return true } - if !IsValidLegacyMetricName(LabelValue(l.GetName())) { + if !IsValidLegacyMetricName(l.GetName()) { return true } } return false } -const ( - lowerhex = "0123456789abcdef" -) - // EscapeName escapes the incoming name according to the provided escaping // scheme. Depending on the rules of escaping, this may cause no change in the // string that is returned. (Especially NoEscaping, which by definition is a @@ -283,7 +284,7 @@ func EscapeName(name string, scheme EscapingScheme) string { case NoEscaping: return name case UnderscoreEscaping: - if IsValidLegacyMetricName(LabelValue(name)) { + if IsValidLegacyMetricName(name) { return name } for i, b := range name { @@ -304,31 +305,25 @@ func EscapeName(name string, scheme EscapingScheme) string { } else if isValidLegacyRune(b, i) { escaped.WriteRune(b) } else { - escaped.WriteRune('_') + escaped.WriteString("__") } } return escaped.String() case ValueEncodingEscaping: - if IsValidLegacyMetricName(LabelValue(name)) { + if IsValidLegacyMetricName(name) { return name } escaped.WriteString("U__") for i, b := range name { - if isValidLegacyRune(b, i) { + if b == '_' { + escaped.WriteString("__") + } else if isValidLegacyRune(b, i) { escaped.WriteRune(b) } else if !utf8.ValidRune(b) { escaped.WriteString("_FFFD_") - } else if b < 0x100 { - escaped.WriteRune('_') - for s := 4; s >= 0; s -= 4 { - escaped.WriteByte(lowerhex[b>>uint(s)&0xF]) - } - escaped.WriteRune('_') - } else if b < 0x10000 { + } else { escaped.WriteRune('_') - for s := 12; s >= 0; s -= 4 { - escaped.WriteByte(lowerhex[b>>uint(s)&0xF]) - } + escaped.WriteString(strconv.FormatInt(int64(b), 16)) escaped.WriteRune('_') } } @@ -386,8 +381,9 @@ func UnescapeName(name string, scheme EscapingScheme) string { // We think we are in a UTF-8 code, process it. var utf8Val uint for j := 0; i < len(escapedName); j++ { - // This is too many characters for a utf8 value. - if j > 4 { + // This is too many characters for a utf8 value based on the MaxRune + // value of '\U0010FFFF'. + if j >= 6 { return name } // Found a closing underscore, convert to a rune, check validity, and append. @@ -440,7 +436,7 @@ func (e EscapingScheme) String() string { func ToEscapingScheme(s string) (EscapingScheme, error) { if s == "" { - return NoEscaping, fmt.Errorf("got empty string instead of escaping scheme") + return NoEscaping, errors.New("got empty string instead of escaping scheme") } switch s { case AllowUTF8: @@ -452,6 +448,6 @@ func ToEscapingScheme(s string) (EscapingScheme, error) { case EscapeValues: return ValueEncodingEscaping, nil default: - return NoEscaping, fmt.Errorf("unknown format scheme " + s) + return NoEscaping, fmt.Errorf("unknown format scheme %s", s) } } diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go index 910b0b71..8f91a970 100644 --- a/vendor/github.com/prometheus/common/model/silence.go +++ b/vendor/github.com/prometheus/common/model/silence.go @@ -15,6 +15,7 @@ package model import ( "encoding/json" + "errors" "fmt" "regexp" "time" @@ -34,7 +35,7 @@ func (m *Matcher) UnmarshalJSON(b []byte) error { } if len(m.Name) == 0 { - return fmt.Errorf("label name in matcher must not be empty") + return errors.New("label name in matcher must not be empty") } if m.IsRegex { if _, err := regexp.Compile(m.Value); err != nil { @@ -77,7 +78,7 @@ type Silence struct { // Validate returns true iff all fields of the silence have valid values. func (s *Silence) Validate() error { if len(s.Matchers) == 0 { - return fmt.Errorf("at least one matcher required") + return errors.New("at least one matcher required") } for _, m := range s.Matchers { if err := m.Validate(); err != nil { @@ -85,22 +86,22 @@ func (s *Silence) Validate() error { } } if s.StartsAt.IsZero() { - return fmt.Errorf("start time missing") + return errors.New("start time missing") } if s.EndsAt.IsZero() { - return fmt.Errorf("end time missing") + return errors.New("end time missing") } if s.EndsAt.Before(s.StartsAt) { - return fmt.Errorf("start time must be before end time") + return errors.New("start time must be before end time") } if s.CreatedBy == "" { - return fmt.Errorf("creator information missing") + return errors.New("creator information missing") } if s.Comment == "" { - return fmt.Errorf("comment missing") + return errors.New("comment missing") } if s.CreatedAt.IsZero() { - return fmt.Errorf("creation timestamp missing") + return errors.New("creation timestamp missing") } return nil } diff --git a/vendor/github.com/prometheus/common/model/value_float.go b/vendor/github.com/prometheus/common/model/value_float.go index ae35cc2a..6bfc757d 100644 --- a/vendor/github.com/prometheus/common/model/value_float.go +++ b/vendor/github.com/prometheus/common/model/value_float.go @@ -15,6 +15,7 @@ package model import ( "encoding/json" + "errors" "fmt" "math" "strconv" @@ -39,7 +40,7 @@ func (v SampleValue) MarshalJSON() ([]byte, error) { // UnmarshalJSON implements json.Unmarshaler. func (v *SampleValue) UnmarshalJSON(b []byte) error { if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { - return fmt.Errorf("sample value must be a quoted string") + return errors.New("sample value must be a quoted string") } f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) if err != nil { diff --git a/vendor/github.com/prometheus/common/model/value_histogram.go b/vendor/github.com/prometheus/common/model/value_histogram.go index 54bb038c..895e6a3e 100644 --- a/vendor/github.com/prometheus/common/model/value_histogram.go +++ b/vendor/github.com/prometheus/common/model/value_histogram.go @@ -15,6 +15,7 @@ package model import ( "encoding/json" + "errors" "fmt" "strconv" "strings" @@ -32,7 +33,7 @@ func (v FloatString) MarshalJSON() ([]byte, error) { func (v *FloatString) UnmarshalJSON(b []byte) error { if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { - return fmt.Errorf("float value must be a quoted string") + return errors.New("float value must be a quoted string") } f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) if err != nil { @@ -141,7 +142,7 @@ type SampleHistogramPair struct { func (s SampleHistogramPair) MarshalJSON() ([]byte, error) { if s.Histogram == nil { - return nil, fmt.Errorf("histogram is nil") + return nil, errors.New("histogram is nil") } t, err := json.Marshal(s.Timestamp) if err != nil { @@ -164,7 +165,7 @@ func (s *SampleHistogramPair) UnmarshalJSON(buf []byte) error { return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen) } if s.Histogram == nil { - return fmt.Errorf("histogram is null") + return errors.New("histogram is null") } return nil } diff --git a/vendor/github.com/prometheus/procfs/.golangci.yml b/vendor/github.com/prometheus/procfs/.golangci.yml index c24864a9..126df9e6 100644 --- a/vendor/github.com/prometheus/procfs/.golangci.yml +++ b/vendor/github.com/prometheus/procfs/.golangci.yml @@ -1,9 +1,16 @@ --- linters: enable: + - errcheck - godot + - gosimple + - govet + - ineffassign - misspell - revive + - staticcheck + - testifylint + - unused linter-settings: godot: diff --git a/vendor/github.com/prometheus/procfs/MAINTAINERS.md b/vendor/github.com/prometheus/procfs/MAINTAINERS.md index 56ba67d3..e00f3b36 100644 --- a/vendor/github.com/prometheus/procfs/MAINTAINERS.md +++ b/vendor/github.com/prometheus/procfs/MAINTAINERS.md @@ -1,2 +1,3 @@ * Johannes 'fish' Ziemke @discordianfish -* Paul Gier @pgier +* Paul Gier @pgier +* Ben Kochie @SuperQ diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common index 062a2818..16172923 100644 --- a/vendor/github.com/prometheus/procfs/Makefile.common +++ b/vendor/github.com/prometheus/procfs/Makefile.common @@ -49,23 +49,23 @@ endif GOTEST := $(GO) test GOTEST_DIR := ifneq ($(CIRCLE_JOB),) -ifneq ($(shell command -v gotestsum > /dev/null),) +ifneq ($(shell command -v gotestsum 2> /dev/null),) GOTEST_DIR := test-results GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml -- endif endif -PROMU_VERSION ?= 0.15.0 +PROMU_VERSION ?= 0.17.0 PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.54.2 -# golangci-lint only supports linux, darwin and windows platforms on i386/amd64. +GOLANGCI_LINT_VERSION ?= v1.59.0 +# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) - ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386)) + ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386 arm64)) # If we're in CI and there is an Actions file, that means the linter # is being run in Actions, so we don't need to run it here. ifneq (,$(SKIP_GOLANGCI_LINT)) @@ -169,16 +169,20 @@ common-vet: common-lint: $(GOLANGCI_LINT) ifdef GOLANGCI_LINT @echo ">> running golangci-lint" -# 'go list' needs to be executed before staticcheck to prepopulate the modules cache. -# Otherwise staticcheck might fail randomly for some reason not yet explained. - $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs) endif +.PHONY: common-lint-fix +common-lint-fix: $(GOLANGCI_LINT) +ifdef GOLANGCI_LINT + @echo ">> running golangci-lint fix" + $(GOLANGCI_LINT) run --fix $(GOLANGCI_LINT_OPTS) $(pkgs) +endif + .PHONY: common-yamllint common-yamllint: @echo ">> running yamllint on all YAML files in the repository" -ifeq (, $(shell command -v yamllint > /dev/null)) +ifeq (, $(shell command -v yamllint 2> /dev/null)) @echo "yamllint not installed so skipping" else yamllint . @@ -204,6 +208,10 @@ common-tarball: promu @echo ">> building release tarball" $(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) +.PHONY: common-docker-repo-name +common-docker-repo-name: + @echo "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)" + .PHONY: common-docker $(BUILD_DOCKER_ARCHS) common-docker: $(BUILD_DOCKER_ARCHS) $(BUILD_DOCKER_ARCHS): common-docker-%: diff --git a/vendor/github.com/prometheus/procfs/arp.go b/vendor/github.com/prometheus/procfs/arp.go index 28783e2d..cdcc8a7c 100644 --- a/vendor/github.com/prometheus/procfs/arp.go +++ b/vendor/github.com/prometheus/procfs/arp.go @@ -55,7 +55,7 @@ type ARPEntry struct { func (fs FS) GatherARPEntries() ([]ARPEntry, error) { data, err := os.ReadFile(fs.proc.Path("net/arp")) if err != nil { - return nil, fmt.Errorf("%s: error reading arp %s: %w", ErrFileRead, fs.proc.Path("net/arp"), err) + return nil, fmt.Errorf("%w: error reading arp %s: %w", ErrFileRead, fs.proc.Path("net/arp"), err) } return parseARPEntries(data) @@ -78,11 +78,11 @@ func parseARPEntries(data []byte) ([]ARPEntry, error) { } else if width == expectedDataWidth { entry, err := parseARPEntry(columns) if err != nil { - return []ARPEntry{}, fmt.Errorf("%s: Failed to parse ARP entry: %v: %w", ErrFileParse, entry, err) + return []ARPEntry{}, fmt.Errorf("%w: Failed to parse ARP entry: %v: %w", ErrFileParse, entry, err) } entries = append(entries, entry) } else { - return []ARPEntry{}, fmt.Errorf("%s: %d columns found, but expected %d: %w", ErrFileParse, width, expectedDataWidth, err) + return []ARPEntry{}, fmt.Errorf("%w: %d columns found, but expected %d: %w", ErrFileParse, width, expectedDataWidth, err) } } diff --git a/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/prometheus/procfs/buddyinfo.go index 4a173636..83807500 100644 --- a/vendor/github.com/prometheus/procfs/buddyinfo.go +++ b/vendor/github.com/prometheus/procfs/buddyinfo.go @@ -58,8 +58,8 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) { return nil, fmt.Errorf("%w: Invalid number of fields, found: %v", ErrFileParse, parts) } - node := strings.TrimRight(parts[1], ",") - zone := strings.TrimRight(parts[3], ",") + node := strings.TrimSuffix(parts[1], ",") + zone := strings.TrimSuffix(parts[3], ",") arraySize := len(parts[4:]) if bucketCount == -1 { @@ -74,7 +74,7 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) { for i := 0; i < arraySize; i++ { sizes[i], err = strconv.ParseFloat(parts[i+4], 64) if err != nil { - return nil, fmt.Errorf("%s: Invalid valid in buddyinfo: %f: %w", ErrFileParse, sizes[i], err) + return nil, fmt.Errorf("%w: Invalid valid in buddyinfo: %f: %w", ErrFileParse, sizes[i], err) } } diff --git a/vendor/github.com/prometheus/procfs/cpuinfo.go b/vendor/github.com/prometheus/procfs/cpuinfo.go index f4f5501c..f0950bb4 100644 --- a/vendor/github.com/prometheus/procfs/cpuinfo.go +++ b/vendor/github.com/prometheus/procfs/cpuinfo.go @@ -194,7 +194,7 @@ func parseCPUInfoARM(info []byte) ([]CPUInfo, error) { firstLine := firstNonEmptyLine(scanner) match, err := regexp.MatchString("^[Pp]rocessor", firstLine) if !match || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("%s: Cannot parse line: %q: %w", ErrFileParse, firstLine, err) + return nil, fmt.Errorf("%w: Cannot parse line: %q: %w", ErrFileParse, firstLine, err) } field := strings.SplitN(firstLine, ": ", 2) @@ -386,7 +386,7 @@ func parseCPUInfoLoong(info []byte) ([]CPUInfo, error) { // find the first "processor" line firstLine := firstNonEmptyLine(scanner) if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") { - return nil, errors.New("invalid cpuinfo file: " + firstLine) + return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine) } field := strings.SplitN(firstLine, ": ", 2) cpuinfo := []CPUInfo{} diff --git a/vendor/github.com/prometheus/procfs/crypto.go b/vendor/github.com/prometheus/procfs/crypto.go index 9a73e263..5f2a37a7 100644 --- a/vendor/github.com/prometheus/procfs/crypto.go +++ b/vendor/github.com/prometheus/procfs/crypto.go @@ -55,13 +55,13 @@ func (fs FS) Crypto() ([]Crypto, error) { path := fs.proc.Path("crypto") b, err := util.ReadFileNoStat(path) if err != nil { - return nil, fmt.Errorf("%s: Cannot read file %v: %w", ErrFileRead, b, err) + return nil, fmt.Errorf("%w: Cannot read file %v: %w", ErrFileRead, b, err) } crypto, err := parseCrypto(bytes.NewReader(b)) if err != nil { - return nil, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, crypto, err) + return nil, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, crypto, err) } return crypto, nil @@ -84,7 +84,7 @@ func parseCrypto(r io.Reader) ([]Crypto, error) { kv := strings.Split(text, ":") if len(kv) != 2 { - return nil, fmt.Errorf("%w: Cannot parae line: %q", ErrFileParse, text) + return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, text) } k := strings.TrimSpace(kv[0]) diff --git a/vendor/github.com/prometheus/procfs/fscache.go b/vendor/github.com/prometheus/procfs/fscache.go index f560a8db..cf2e3eaa 100644 --- a/vendor/github.com/prometheus/procfs/fscache.go +++ b/vendor/github.com/prometheus/procfs/fscache.go @@ -236,7 +236,7 @@ func (fs FS) Fscacheinfo() (Fscacheinfo, error) { m, err := parseFscacheinfo(bytes.NewReader(b)) if err != nil { - return Fscacheinfo{}, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, m, err) + return Fscacheinfo{}, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, m, err) } return *m, nil @@ -245,7 +245,7 @@ func (fs FS) Fscacheinfo() (Fscacheinfo, error) { func setFSCacheFields(fields []string, setFields ...*uint64) error { var err error if len(fields) < len(setFields) { - return fmt.Errorf("%s: Expected %d, but got %d: %w", ErrFileParse, len(setFields), len(fields), err) + return fmt.Errorf("%w: Expected %d, but got %d: %w", ErrFileParse, len(setFields), len(fields), err) } for i := range setFields { diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go index 5a145bbf..bc3a20c9 100644 --- a/vendor/github.com/prometheus/procfs/ipvs.go +++ b/vendor/github.com/prometheus/procfs/ipvs.go @@ -221,16 +221,16 @@ func parseIPPort(s string) (net.IP, uint16, error) { case 46: ip = net.ParseIP(s[1:40]) if ip == nil { - return nil, 0, fmt.Errorf("%s: Invalid IPv6 addr %s: %w", ErrFileParse, s[1:40], err) + return nil, 0, fmt.Errorf("%w: Invalid IPv6 addr %s: %w", ErrFileParse, s[1:40], err) } default: - return nil, 0, fmt.Errorf("%s: Unexpected IP:Port %s: %w", ErrFileParse, s, err) + return nil, 0, fmt.Errorf("%w: Unexpected IP:Port %s: %w", ErrFileParse, s, err) } portString := s[len(s)-4:] if len(portString) != 4 { return nil, 0, - fmt.Errorf("%s: Unexpected port string format %s: %w", ErrFileParse, portString, err) + fmt.Errorf("%w: Unexpected port string format %s: %w", ErrFileParse, portString, err) } port, err := strconv.ParseUint(portString, 16, 16) if err != nil { diff --git a/vendor/github.com/prometheus/procfs/loadavg.go b/vendor/github.com/prometheus/procfs/loadavg.go index 59465c5b..332e76c1 100644 --- a/vendor/github.com/prometheus/procfs/loadavg.go +++ b/vendor/github.com/prometheus/procfs/loadavg.go @@ -51,7 +51,7 @@ func parseLoad(loadavgBytes []byte) (*LoadAvg, error) { for i, load := range parts[0:3] { loads[i], err = strconv.ParseFloat(load, 64) if err != nil { - return nil, fmt.Errorf("%s: Cannot parse load: %f: %w", ErrFileParse, loads[i], err) + return nil, fmt.Errorf("%w: Cannot parse load: %f: %w", ErrFileParse, loads[i], err) } } return &LoadAvg{ diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go index fdd4b954..67a9d2b4 100644 --- a/vendor/github.com/prometheus/procfs/mdstat.go +++ b/vendor/github.com/prometheus/procfs/mdstat.go @@ -23,7 +23,7 @@ import ( var ( statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[([U_]+)\]`) - recoveryLineBlocksRE = regexp.MustCompile(`\((\d+)/\d+\)`) + recoveryLineBlocksRE = regexp.MustCompile(`\((\d+/\d+)\)`) recoveryLinePctRE = regexp.MustCompile(`= (.+)%`) recoveryLineFinishRE = regexp.MustCompile(`finish=(.+)min`) recoveryLineSpeedRE = regexp.MustCompile(`speed=(.+)[A-Z]`) @@ -50,6 +50,8 @@ type MDStat struct { BlocksTotal int64 // Number of blocks on the device that are in sync. BlocksSynced int64 + // Number of blocks on the device that need to be synced. + BlocksToBeSynced int64 // progress percentage of current sync BlocksSyncedPct float64 // estimated finishing time for current sync (in minutes) @@ -70,7 +72,7 @@ func (fs FS) MDStat() ([]MDStat, error) { } mdstat, err := parseMDStat(data) if err != nil { - return nil, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, fs.proc.Path("mdstat"), err) + return nil, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, fs.proc.Path("mdstat"), err) } return mdstat, nil } @@ -90,7 +92,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { deviceFields := strings.Fields(line) if len(deviceFields) < 3 { - return nil, fmt.Errorf("%s: Expected 3+ lines, got %q", ErrFileParse, line) + return nil, fmt.Errorf("%w: Expected 3+ lines, got %q", ErrFileParse, line) } mdName := deviceFields[0] // mdx state := deviceFields[2] // active or inactive @@ -105,7 +107,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { active, total, down, size, err := evalStatusLine(lines[i], lines[i+1]) if err != nil { - return nil, fmt.Errorf("%s: Cannot parse md device lines: %v: %w", ErrFileParse, active, err) + return nil, fmt.Errorf("%w: Cannot parse md device lines: %v: %w", ErrFileParse, active, err) } syncLineIdx := i + 2 @@ -115,7 +117,8 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { // If device is syncing at the moment, get the number of currently // synced bytes, otherwise that number equals the size of the device. - syncedBlocks := size + blocksSynced := size + blocksToBeSynced := size speed := float64(0) finish := float64(0) pct := float64(0) @@ -136,11 +139,11 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { // Handle case when resync=PENDING or resync=DELAYED. if strings.Contains(lines[syncLineIdx], "PENDING") || strings.Contains(lines[syncLineIdx], "DELAYED") { - syncedBlocks = 0 + blocksSynced = 0 } else { - syncedBlocks, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx]) + blocksSynced, blocksToBeSynced, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx]) if err != nil { - return nil, fmt.Errorf("%s: Cannot parse sync line in md device: %q: %w", ErrFileParse, mdName, err) + return nil, fmt.Errorf("%w: Cannot parse sync line in md device: %q: %w", ErrFileParse, mdName, err) } } } @@ -154,7 +157,8 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { DisksSpare: spare, DisksTotal: total, BlocksTotal: size, - BlocksSynced: syncedBlocks, + BlocksSynced: blocksSynced, + BlocksToBeSynced: blocksToBeSynced, BlocksSyncedPct: pct, BlocksSyncedFinishTime: finish, BlocksSyncedSpeed: speed, @@ -168,13 +172,13 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { func evalStatusLine(deviceLine, statusLine string) (active, total, down, size int64, err error) { statusFields := strings.Fields(statusLine) if len(statusFields) < 1 { - return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) + return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) } sizeStr := statusFields[0] size, err = strconv.ParseInt(sizeStr, 10, 64) if err != nil { - return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) + return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) } if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") { @@ -189,65 +193,71 @@ func evalStatusLine(deviceLine, statusLine string) (active, total, down, size in matches := statusLineRE.FindStringSubmatch(statusLine) if len(matches) != 5 { - return 0, 0, 0, 0, fmt.Errorf("%s: Could not fild all substring matches %s: %w", ErrFileParse, statusLine, err) + return 0, 0, 0, 0, fmt.Errorf("%w: Could not fild all substring matches %s: %w", ErrFileParse, statusLine, err) } total, err = strconv.ParseInt(matches[2], 10, 64) if err != nil { - return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) + return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) } active, err = strconv.ParseInt(matches[3], 10, 64) if err != nil { - return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected active %d: %w", ErrFileParse, active, err) + return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected active %d: %w", ErrFileParse, active, err) } down = int64(strings.Count(matches[4], "_")) return active, total, down, size, nil } -func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, pct float64, finish float64, speed float64, err error) { +func evalRecoveryLine(recoveryLine string) (blocksSynced int64, blocksToBeSynced int64, pct float64, finish float64, speed float64, err error) { matches := recoveryLineBlocksRE.FindStringSubmatch(recoveryLine) if len(matches) != 2 { - return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected recoveryLine %s: %w", ErrFileParse, recoveryLine, err) + return 0, 0, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine blocks %s: %w", ErrFileParse, recoveryLine, err) } - syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64) + blocks := strings.Split(matches[1], "/") + blocksSynced, err = strconv.ParseInt(blocks[0], 10, 64) if err != nil { - return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected parsing of recoveryLine %q: %w", ErrFileParse, recoveryLine, err) + return 0, 0, 0, 0, 0, fmt.Errorf("%w: Unable to parse recovery blocks synced %q: %w", ErrFileParse, matches[1], err) + } + + blocksToBeSynced, err = strconv.ParseInt(blocks[1], 10, 64) + if err != nil { + return blocksSynced, 0, 0, 0, 0, fmt.Errorf("%w: Unable to parse recovery to be synced blocks %q: %w", ErrFileParse, matches[2], err) } // Get percentage complete matches = recoveryLinePctRE.FindStringSubmatch(recoveryLine) if len(matches) != 2 { - return syncedBlocks, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching percentage %s", ErrFileParse, recoveryLine) + return blocksSynced, blocksToBeSynced, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching percentage %s", ErrFileParse, recoveryLine) } pct, err = strconv.ParseFloat(strings.TrimSpace(matches[1]), 64) if err != nil { - return syncedBlocks, 0, 0, 0, fmt.Errorf("%w: Error parsing float from recoveryLine %q", ErrFileParse, recoveryLine) + return blocksSynced, blocksToBeSynced, 0, 0, 0, fmt.Errorf("%w: Error parsing float from recoveryLine %q", ErrFileParse, recoveryLine) } // Get time expected left to complete matches = recoveryLineFinishRE.FindStringSubmatch(recoveryLine) if len(matches) != 2 { - return syncedBlocks, pct, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching est. finish time: %s", ErrFileParse, recoveryLine) + return blocksSynced, blocksToBeSynced, pct, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching est. finish time: %s", ErrFileParse, recoveryLine) } finish, err = strconv.ParseFloat(matches[1], 64) if err != nil { - return syncedBlocks, pct, 0, 0, fmt.Errorf("%w: Unable to parse float from recoveryLine: %q", ErrFileParse, recoveryLine) + return blocksSynced, blocksToBeSynced, pct, 0, 0, fmt.Errorf("%w: Unable to parse float from recoveryLine: %q", ErrFileParse, recoveryLine) } // Get recovery speed matches = recoveryLineSpeedRE.FindStringSubmatch(recoveryLine) if len(matches) != 2 { - return syncedBlocks, pct, finish, 0, fmt.Errorf("%w: Unexpected recoveryLine value: %s", ErrFileParse, recoveryLine) + return blocksSynced, blocksToBeSynced, pct, finish, 0, fmt.Errorf("%w: Unexpected recoveryLine value: %s", ErrFileParse, recoveryLine) } speed, err = strconv.ParseFloat(matches[1], 64) if err != nil { - return syncedBlocks, pct, finish, 0, fmt.Errorf("%s: Error parsing float from recoveryLine: %q: %w", ErrFileParse, recoveryLine, err) + return blocksSynced, blocksToBeSynced, pct, finish, 0, fmt.Errorf("%w: Error parsing float from recoveryLine: %q: %w", ErrFileParse, recoveryLine, err) } - return syncedBlocks, pct, finish, speed, nil + return blocksSynced, blocksToBeSynced, pct, finish, speed, nil } func evalComponentDevices(deviceFields []string) []string { diff --git a/vendor/github.com/prometheus/procfs/meminfo.go b/vendor/github.com/prometheus/procfs/meminfo.go index eaf00e22..4b2c4050 100644 --- a/vendor/github.com/prometheus/procfs/meminfo.go +++ b/vendor/github.com/prometheus/procfs/meminfo.go @@ -126,6 +126,7 @@ type Meminfo struct { VmallocUsed *uint64 // largest contiguous block of vmalloc area which is free VmallocChunk *uint64 + Percpu *uint64 HardwareCorrupted *uint64 AnonHugePages *uint64 ShmemHugePages *uint64 @@ -140,6 +141,55 @@ type Meminfo struct { DirectMap4k *uint64 DirectMap2M *uint64 DirectMap1G *uint64 + + // The struct fields below are the byte-normalized counterparts to the + // existing struct fields. Values are normalized using the optional + // unit field in the meminfo line. + MemTotalBytes *uint64 + MemFreeBytes *uint64 + MemAvailableBytes *uint64 + BuffersBytes *uint64 + CachedBytes *uint64 + SwapCachedBytes *uint64 + ActiveBytes *uint64 + InactiveBytes *uint64 + ActiveAnonBytes *uint64 + InactiveAnonBytes *uint64 + ActiveFileBytes *uint64 + InactiveFileBytes *uint64 + UnevictableBytes *uint64 + MlockedBytes *uint64 + SwapTotalBytes *uint64 + SwapFreeBytes *uint64 + DirtyBytes *uint64 + WritebackBytes *uint64 + AnonPagesBytes *uint64 + MappedBytes *uint64 + ShmemBytes *uint64 + SlabBytes *uint64 + SReclaimableBytes *uint64 + SUnreclaimBytes *uint64 + KernelStackBytes *uint64 + PageTablesBytes *uint64 + NFSUnstableBytes *uint64 + BounceBytes *uint64 + WritebackTmpBytes *uint64 + CommitLimitBytes *uint64 + CommittedASBytes *uint64 + VmallocTotalBytes *uint64 + VmallocUsedBytes *uint64 + VmallocChunkBytes *uint64 + PercpuBytes *uint64 + HardwareCorruptedBytes *uint64 + AnonHugePagesBytes *uint64 + ShmemHugePagesBytes *uint64 + ShmemPmdMappedBytes *uint64 + CmaTotalBytes *uint64 + CmaFreeBytes *uint64 + HugepagesizeBytes *uint64 + DirectMap4kBytes *uint64 + DirectMap2MBytes *uint64 + DirectMap1GBytes *uint64 } // Meminfo returns an information about current kernel/system memory statistics. @@ -152,7 +202,7 @@ func (fs FS) Meminfo() (Meminfo, error) { m, err := parseMemInfo(bytes.NewReader(b)) if err != nil { - return Meminfo{}, fmt.Errorf("%s: %w", ErrFileParse, err) + return Meminfo{}, fmt.Errorf("%w: %w", ErrFileParse, err) } return *m, nil @@ -162,114 +212,176 @@ func parseMemInfo(r io.Reader) (*Meminfo, error) { var m Meminfo s := bufio.NewScanner(r) for s.Scan() { - // Each line has at least a name and value; we ignore the unit. fields := strings.Fields(s.Text()) - if len(fields) < 2 { - return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, s.Text()) - } + var val, valBytes uint64 - v, err := strconv.ParseUint(fields[1], 0, 64) + val, err := strconv.ParseUint(fields[1], 0, 64) if err != nil { return nil, err } + switch len(fields) { + case 2: + // No unit present, use the parsed the value as bytes directly. + valBytes = val + case 3: + // Unit present in optional 3rd field, convert it to + // bytes. The only unit supported within the Linux + // kernel is `kB`. + if fields[2] != "kB" { + return nil, fmt.Errorf("%w: Unsupported unit in optional 3rd field %q", ErrFileParse, fields[2]) + } + + valBytes = 1024 * val + + default: + return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, s.Text()) + } + switch fields[0] { case "MemTotal:": - m.MemTotal = &v + m.MemTotal = &val + m.MemTotalBytes = &valBytes case "MemFree:": - m.MemFree = &v + m.MemFree = &val + m.MemFreeBytes = &valBytes case "MemAvailable:": - m.MemAvailable = &v + m.MemAvailable = &val + m.MemAvailableBytes = &valBytes case "Buffers:": - m.Buffers = &v + m.Buffers = &val + m.BuffersBytes = &valBytes case "Cached:": - m.Cached = &v + m.Cached = &val + m.CachedBytes = &valBytes case "SwapCached:": - m.SwapCached = &v + m.SwapCached = &val + m.SwapCachedBytes = &valBytes case "Active:": - m.Active = &v + m.Active = &val + m.ActiveBytes = &valBytes case "Inactive:": - m.Inactive = &v + m.Inactive = &val + m.InactiveBytes = &valBytes case "Active(anon):": - m.ActiveAnon = &v + m.ActiveAnon = &val + m.ActiveAnonBytes = &valBytes case "Inactive(anon):": - m.InactiveAnon = &v + m.InactiveAnon = &val + m.InactiveAnonBytes = &valBytes case "Active(file):": - m.ActiveFile = &v + m.ActiveFile = &val + m.ActiveFileBytes = &valBytes case "Inactive(file):": - m.InactiveFile = &v + m.InactiveFile = &val + m.InactiveFileBytes = &valBytes case "Unevictable:": - m.Unevictable = &v + m.Unevictable = &val + m.UnevictableBytes = &valBytes case "Mlocked:": - m.Mlocked = &v + m.Mlocked = &val + m.MlockedBytes = &valBytes case "SwapTotal:": - m.SwapTotal = &v + m.SwapTotal = &val + m.SwapTotalBytes = &valBytes case "SwapFree:": - m.SwapFree = &v + m.SwapFree = &val + m.SwapFreeBytes = &valBytes case "Dirty:": - m.Dirty = &v + m.Dirty = &val + m.DirtyBytes = &valBytes case "Writeback:": - m.Writeback = &v + m.Writeback = &val + m.WritebackBytes = &valBytes case "AnonPages:": - m.AnonPages = &v + m.AnonPages = &val + m.AnonPagesBytes = &valBytes case "Mapped:": - m.Mapped = &v + m.Mapped = &val + m.MappedBytes = &valBytes case "Shmem:": - m.Shmem = &v + m.Shmem = &val + m.ShmemBytes = &valBytes case "Slab:": - m.Slab = &v + m.Slab = &val + m.SlabBytes = &valBytes case "SReclaimable:": - m.SReclaimable = &v + m.SReclaimable = &val + m.SReclaimableBytes = &valBytes case "SUnreclaim:": - m.SUnreclaim = &v + m.SUnreclaim = &val + m.SUnreclaimBytes = &valBytes case "KernelStack:": - m.KernelStack = &v + m.KernelStack = &val + m.KernelStackBytes = &valBytes case "PageTables:": - m.PageTables = &v + m.PageTables = &val + m.PageTablesBytes = &valBytes case "NFS_Unstable:": - m.NFSUnstable = &v + m.NFSUnstable = &val + m.NFSUnstableBytes = &valBytes case "Bounce:": - m.Bounce = &v + m.Bounce = &val + m.BounceBytes = &valBytes case "WritebackTmp:": - m.WritebackTmp = &v + m.WritebackTmp = &val + m.WritebackTmpBytes = &valBytes case "CommitLimit:": - m.CommitLimit = &v + m.CommitLimit = &val + m.CommitLimitBytes = &valBytes case "Committed_AS:": - m.CommittedAS = &v + m.CommittedAS = &val + m.CommittedASBytes = &valBytes case "VmallocTotal:": - m.VmallocTotal = &v + m.VmallocTotal = &val + m.VmallocTotalBytes = &valBytes case "VmallocUsed:": - m.VmallocUsed = &v + m.VmallocUsed = &val + m.VmallocUsedBytes = &valBytes case "VmallocChunk:": - m.VmallocChunk = &v + m.VmallocChunk = &val + m.VmallocChunkBytes = &valBytes + case "Percpu:": + m.Percpu = &val + m.PercpuBytes = &valBytes case "HardwareCorrupted:": - m.HardwareCorrupted = &v + m.HardwareCorrupted = &val + m.HardwareCorruptedBytes = &valBytes case "AnonHugePages:": - m.AnonHugePages = &v + m.AnonHugePages = &val + m.AnonHugePagesBytes = &valBytes case "ShmemHugePages:": - m.ShmemHugePages = &v + m.ShmemHugePages = &val + m.ShmemHugePagesBytes = &valBytes case "ShmemPmdMapped:": - m.ShmemPmdMapped = &v + m.ShmemPmdMapped = &val + m.ShmemPmdMappedBytes = &valBytes case "CmaTotal:": - m.CmaTotal = &v + m.CmaTotal = &val + m.CmaTotalBytes = &valBytes case "CmaFree:": - m.CmaFree = &v + m.CmaFree = &val + m.CmaFreeBytes = &valBytes case "HugePages_Total:": - m.HugePagesTotal = &v + m.HugePagesTotal = &val case "HugePages_Free:": - m.HugePagesFree = &v + m.HugePagesFree = &val case "HugePages_Rsvd:": - m.HugePagesRsvd = &v + m.HugePagesRsvd = &val case "HugePages_Surp:": - m.HugePagesSurp = &v + m.HugePagesSurp = &val case "Hugepagesize:": - m.Hugepagesize = &v + m.Hugepagesize = &val + m.HugepagesizeBytes = &valBytes case "DirectMap4k:": - m.DirectMap4k = &v + m.DirectMap4k = &val + m.DirectMap4kBytes = &valBytes case "DirectMap2M:": - m.DirectMap2M = &v + m.DirectMap2M = &val + m.DirectMap2MBytes = &valBytes case "DirectMap1G:": - m.DirectMap1G = &v + m.DirectMap1G = &val + m.DirectMap1GBytes = &valBytes } } diff --git a/vendor/github.com/prometheus/procfs/mountinfo.go b/vendor/github.com/prometheus/procfs/mountinfo.go index 388ebf39..a704c5e7 100644 --- a/vendor/github.com/prometheus/procfs/mountinfo.go +++ b/vendor/github.com/prometheus/procfs/mountinfo.go @@ -109,7 +109,7 @@ func parseMountInfoString(mountString string) (*MountInfo, error) { if mountInfo[6] != "" { mount.OptionalFields, err = mountOptionsParseOptionalFields(mountInfo[6 : mountInfoLength-4]) if err != nil { - return nil, fmt.Errorf("%s: %w", ErrFileParse, err) + return nil, fmt.Errorf("%w: %w", ErrFileParse, err) } } return mount, nil diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go index 9d8af6db..75a3b6c8 100644 --- a/vendor/github.com/prometheus/procfs/mountstats.go +++ b/vendor/github.com/prometheus/procfs/mountstats.go @@ -88,7 +88,7 @@ type MountStatsNFS struct { // Statistics broken down by filesystem operation. Operations []NFSOperationStats // Statistics about the NFS RPC transport. - Transport NFSTransportStats + Transport []NFSTransportStats } // mountStats implements MountStats. @@ -194,8 +194,6 @@ type NFSOperationStats struct { CumulativeTotalResponseMilliseconds uint64 // Duration from when a request was enqueued to when it was completely handled. CumulativeTotalRequestMilliseconds uint64 - // The average time from the point the client sends RPC requests until it receives the response. - AverageRTTMilliseconds float64 // The count of operations that complete with tk_status < 0. These statuses usually indicate error conditions. Errors uint64 } @@ -434,7 +432,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e return nil, err } - stats.Transport = *tstats + stats.Transport = append(stats.Transport, *tstats) } // When encountering "per-operation statistics", we must break this @@ -582,9 +580,6 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { CumulativeTotalResponseMilliseconds: ns[6], CumulativeTotalRequestMilliseconds: ns[7], } - if ns[0] != 0 { - opStats.AverageRTTMilliseconds = float64(ns[6]) / float64(ns[0]) - } if len(ns) > 8 { opStats.Errors = ns[8] @@ -632,7 +627,7 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats return nil, fmt.Errorf("%w: invalid NFS transport stats 1.1 statement: %v, protocol: %v", ErrFileParse, ss, protocol) } default: - return nil, fmt.Errorf("%s: Unrecognized NFS transport stats version: %q, protocol: %v", ErrFileParse, statVersion, protocol) + return nil, fmt.Errorf("%w: Unrecognized NFS transport stats version: %q, protocol: %v", ErrFileParse, statVersion, protocol) } // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay diff --git a/vendor/github.com/prometheus/procfs/net_conntrackstat.go b/vendor/github.com/prometheus/procfs/net_conntrackstat.go index fdfa4561..316df5fb 100644 --- a/vendor/github.com/prometheus/procfs/net_conntrackstat.go +++ b/vendor/github.com/prometheus/procfs/net_conntrackstat.go @@ -58,7 +58,7 @@ func readConntrackStat(path string) ([]ConntrackStatEntry, error) { stat, err := parseConntrackStat(bytes.NewReader(b)) if err != nil { - return nil, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, path, err) + return nil, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, path, err) } return stat, nil @@ -86,7 +86,7 @@ func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) { func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) { entries, err := util.ParseHexUint64s(fields) if err != nil { - return nil, fmt.Errorf("%s: Cannot parse entry: %d: %w", ErrFileParse, entries, err) + return nil, fmt.Errorf("%w: Cannot parse entry: %d: %w", ErrFileParse, entries, err) } numEntries := len(entries) if numEntries < 16 || numEntries > 17 { diff --git a/vendor/github.com/prometheus/procfs/net_ip_socket.go b/vendor/github.com/prometheus/procfs/net_ip_socket.go index 4da81ea5..b70f1fc7 100644 --- a/vendor/github.com/prometheus/procfs/net_ip_socket.go +++ b/vendor/github.com/prometheus/procfs/net_ip_socket.go @@ -50,10 +50,13 @@ type ( // UsedSockets shows the total number of parsed lines representing the // number of used sockets. UsedSockets uint64 + // Drops shows the total number of dropped packets of all UPD sockets. + Drops *uint64 } // netIPSocketLine represents the fields parsed from a single line // in /proc/net/{t,u}dp{,6}. Fields which are not used by IPSocket are skipped. + // Drops is non-nil for udp{,6}, but nil for tcp{,6}. // For the proc file format details, see https://linux.die.net/man/5/proc. netIPSocketLine struct { Sl uint64 @@ -66,6 +69,7 @@ type ( RxQueue uint64 UID uint64 Inode uint64 + Drops *uint64 } ) @@ -77,13 +81,14 @@ func newNetIPSocket(file string) (NetIPSocket, error) { defer f.Close() var netIPSocket NetIPSocket + isUDP := strings.Contains(file, "udp") lr := io.LimitReader(f, readLimit) s := bufio.NewScanner(lr) s.Scan() // skip first line with headers for s.Scan() { fields := strings.Fields(s.Text()) - line, err := parseNetIPSocketLine(fields) + line, err := parseNetIPSocketLine(fields, isUDP) if err != nil { return nil, err } @@ -104,19 +109,25 @@ func newNetIPSocketSummary(file string) (*NetIPSocketSummary, error) { defer f.Close() var netIPSocketSummary NetIPSocketSummary + var udpPacketDrops uint64 + isUDP := strings.Contains(file, "udp") lr := io.LimitReader(f, readLimit) s := bufio.NewScanner(lr) s.Scan() // skip first line with headers for s.Scan() { fields := strings.Fields(s.Text()) - line, err := parseNetIPSocketLine(fields) + line, err := parseNetIPSocketLine(fields, isUDP) if err != nil { return nil, err } netIPSocketSummary.TxQueueLength += line.TxQueue netIPSocketSummary.RxQueueLength += line.RxQueue netIPSocketSummary.UsedSockets++ + if isUDP { + udpPacketDrops += *line.Drops + netIPSocketSummary.Drops = &udpPacketDrops + } } if err := s.Err(); err != nil { return nil, err @@ -130,7 +141,7 @@ func parseIP(hexIP string) (net.IP, error) { var byteIP []byte byteIP, err := hex.DecodeString(hexIP) if err != nil { - return nil, fmt.Errorf("%s: Cannot parse socket field in %q: %w", ErrFileParse, hexIP, err) + return nil, fmt.Errorf("%w: Cannot parse socket field in %q: %w", ErrFileParse, hexIP, err) } switch len(byteIP) { case 4: @@ -144,12 +155,12 @@ func parseIP(hexIP string) (net.IP, error) { } return i, nil default: - return nil, fmt.Errorf("%s: Unable to parse IP %s: %w", ErrFileParse, hexIP, nil) + return nil, fmt.Errorf("%w: Unable to parse IP %s: %v", ErrFileParse, hexIP, nil) } } // parseNetIPSocketLine parses a single line, represented by a list of fields. -func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) { +func parseNetIPSocketLine(fields []string, isUDP bool) (*netIPSocketLine, error) { line := &netIPSocketLine{} if len(fields) < 10 { return nil, fmt.Errorf( @@ -167,7 +178,7 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) { } if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil { - return nil, fmt.Errorf("%s: Unable to parse sl field in %q: %w", ErrFileParse, line.Sl, err) + return nil, fmt.Errorf("%w: Unable to parse sl field in %q: %w", ErrFileParse, line.Sl, err) } // local_address l := strings.Split(fields[1], ":") @@ -178,7 +189,7 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) { return nil, err } if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil { - return nil, fmt.Errorf("%s: Unable to parse local_address port value line %q: %w", ErrFileParse, line.LocalPort, err) + return nil, fmt.Errorf("%w: Unable to parse local_address port value line %q: %w", ErrFileParse, line.LocalPort, err) } // remote_address @@ -190,12 +201,12 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) { return nil, err } if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil { - return nil, fmt.Errorf("%s: Cannot parse rem_address port value in %q: %w", ErrFileParse, line.RemPort, err) + return nil, fmt.Errorf("%w: Cannot parse rem_address port value in %q: %w", ErrFileParse, line.RemPort, err) } // st if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil { - return nil, fmt.Errorf("%s: Cannot parse st value in %q: %w", ErrFileParse, line.St, err) + return nil, fmt.Errorf("%w: Cannot parse st value in %q: %w", ErrFileParse, line.St, err) } // tx_queue and rx_queue @@ -208,20 +219,29 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) { ) } if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil { - return nil, fmt.Errorf("%s: Cannot parse tx_queue value in %q: %w", ErrFileParse, line.TxQueue, err) + return nil, fmt.Errorf("%w: Cannot parse tx_queue value in %q: %w", ErrFileParse, line.TxQueue, err) } if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil { - return nil, fmt.Errorf("%s: Cannot parse trx_queue value in %q: %w", ErrFileParse, line.RxQueue, err) + return nil, fmt.Errorf("%w: Cannot parse trx_queue value in %q: %w", ErrFileParse, line.RxQueue, err) } // uid if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil { - return nil, fmt.Errorf("%s: Cannot parse UID value in %q: %w", ErrFileParse, line.UID, err) + return nil, fmt.Errorf("%w: Cannot parse UID value in %q: %w", ErrFileParse, line.UID, err) } // inode if line.Inode, err = strconv.ParseUint(fields[9], 0, 64); err != nil { - return nil, fmt.Errorf("%s: Cannot parse inode value in %q: %w", ErrFileParse, line.Inode, err) + return nil, fmt.Errorf("%w: Cannot parse inode value in %q: %w", ErrFileParse, line.Inode, err) + } + + // drops + if isUDP { + drops, err := strconv.ParseUint(fields[12], 0, 64) + if err != nil { + return nil, fmt.Errorf("%w: Cannot parse drops value in %q: %w", ErrFileParse, drops, err) + } + line.Drops = &drops } return line, nil diff --git a/vendor/github.com/prometheus/procfs/net_sockstat.go b/vendor/github.com/prometheus/procfs/net_sockstat.go index 360e36af..fae62b13 100644 --- a/vendor/github.com/prometheus/procfs/net_sockstat.go +++ b/vendor/github.com/prometheus/procfs/net_sockstat.go @@ -69,7 +69,7 @@ func readSockstat(name string) (*NetSockstat, error) { stat, err := parseSockstat(bytes.NewReader(b)) if err != nil { - return nil, fmt.Errorf("%s: sockstats from %q: %w", ErrFileRead, name, err) + return nil, fmt.Errorf("%w: sockstats from %q: %w", ErrFileRead, name, err) } return stat, nil @@ -89,7 +89,7 @@ func parseSockstat(r io.Reader) (*NetSockstat, error) { // The remaining fields are key/value pairs. kvs, err := parseSockstatKVs(fields[1:]) if err != nil { - return nil, fmt.Errorf("%s: sockstat key/value pairs from %q: %w", ErrFileParse, s.Text(), err) + return nil, fmt.Errorf("%w: sockstat key/value pairs from %q: %w", ErrFileParse, s.Text(), err) } // The first field is the protocol. We must trim its colon suffix. diff --git a/vendor/github.com/prometheus/procfs/net_softnet.go b/vendor/github.com/prometheus/procfs/net_softnet.go index c7708529..71c8059f 100644 --- a/vendor/github.com/prometheus/procfs/net_softnet.go +++ b/vendor/github.com/prometheus/procfs/net_softnet.go @@ -64,7 +64,7 @@ func (fs FS) NetSoftnetStat() ([]SoftnetStat, error) { entries, err := parseSoftnet(bytes.NewReader(b)) if err != nil { - return nil, fmt.Errorf("%s: /proc/net/softnet_stat: %w", ErrFileParse, err) + return nil, fmt.Errorf("%w: /proc/net/softnet_stat: %w", ErrFileParse, err) } return entries, nil diff --git a/vendor/github.com/prometheus/procfs/net_tls_stat.go b/vendor/github.com/prometheus/procfs/net_tls_stat.go new file mode 100644 index 00000000..13994c17 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_tls_stat.go @@ -0,0 +1,119 @@ +// Copyright 2023 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "os" + "strconv" + "strings" +) + +// TLSStat struct represents data in /proc/net/tls_stat. +// See https://docs.kernel.org/networking/tls.html#statistics +type TLSStat struct { + // number of TX sessions currently installed where host handles cryptography + TLSCurrTxSw int + // number of RX sessions currently installed where host handles cryptography + TLSCurrRxSw int + // number of TX sessions currently installed where NIC handles cryptography + TLSCurrTxDevice int + // number of RX sessions currently installed where NIC handles cryptography + TLSCurrRxDevice int + //number of TX sessions opened with host cryptography + TLSTxSw int + //number of RX sessions opened with host cryptography + TLSRxSw int + // number of TX sessions opened with NIC cryptography + TLSTxDevice int + // number of RX sessions opened with NIC cryptography + TLSRxDevice int + // record decryption failed (e.g. due to incorrect authentication tag) + TLSDecryptError int + // number of RX resyncs sent to NICs handling cryptography + TLSRxDeviceResync int + // number of RX records which had to be re-decrypted due to TLS_RX_EXPECT_NO_PAD mis-prediction. Note that this counter will also increment for non-data records. + TLSDecryptRetry int + // number of data RX records which had to be re-decrypted due to TLS_RX_EXPECT_NO_PAD mis-prediction. + TLSRxNoPadViolation int +} + +// NewTLSStat reads the tls_stat statistics. +func NewTLSStat() (TLSStat, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return TLSStat{}, err + } + + return fs.NewTLSStat() +} + +// NewTLSStat reads the tls_stat statistics. +func (fs FS) NewTLSStat() (TLSStat, error) { + file, err := os.Open(fs.proc.Path("net/tls_stat")) + if err != nil { + return TLSStat{}, err + } + defer file.Close() + + var ( + tlsstat = TLSStat{} + s = bufio.NewScanner(file) + ) + + for s.Scan() { + fields := strings.Fields(s.Text()) + + if len(fields) != 2 { + return TLSStat{}, fmt.Errorf("%w: %q line %q", ErrFileParse, file.Name(), s.Text()) + } + + name := fields[0] + value, err := strconv.Atoi(fields[1]) + if err != nil { + return TLSStat{}, err + } + + switch name { + case "TlsCurrTxSw": + tlsstat.TLSCurrTxSw = value + case "TlsCurrRxSw": + tlsstat.TLSCurrRxSw = value + case "TlsCurrTxDevice": + tlsstat.TLSCurrTxDevice = value + case "TlsCurrRxDevice": + tlsstat.TLSCurrRxDevice = value + case "TlsTxSw": + tlsstat.TLSTxSw = value + case "TlsRxSw": + tlsstat.TLSRxSw = value + case "TlsTxDevice": + tlsstat.TLSTxDevice = value + case "TlsRxDevice": + tlsstat.TLSRxDevice = value + case "TlsDecryptError": + tlsstat.TLSDecryptError = value + case "TlsRxDeviceResync": + tlsstat.TLSRxDeviceResync = value + case "TlsDecryptRetry": + tlsstat.TLSDecryptRetry = value + case "TlsRxNoPadViolation": + tlsstat.TLSRxNoPadViolation = value + } + + } + + return tlsstat, s.Err() +} diff --git a/vendor/github.com/prometheus/procfs/net_unix.go b/vendor/github.com/prometheus/procfs/net_unix.go index acbbc57e..d868cebd 100644 --- a/vendor/github.com/prometheus/procfs/net_unix.go +++ b/vendor/github.com/prometheus/procfs/net_unix.go @@ -108,14 +108,14 @@ func parseNetUNIX(r io.Reader) (*NetUNIX, error) { line := s.Text() item, err := nu.parseLine(line, hasInode, minFields) if err != nil { - return nil, fmt.Errorf("%s: /proc/net/unix encountered data %q: %w", ErrFileParse, line, err) + return nil, fmt.Errorf("%w: /proc/net/unix encountered data %q: %w", ErrFileParse, line, err) } nu.Rows = append(nu.Rows, item) } if err := s.Err(); err != nil { - return nil, fmt.Errorf("%s: /proc/net/unix encountered data: %w", ErrFileParse, err) + return nil, fmt.Errorf("%w: /proc/net/unix encountered data: %w", ErrFileParse, err) } return &nu, nil @@ -136,29 +136,29 @@ func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, users, err := u.parseUsers(fields[1]) if err != nil { - return nil, fmt.Errorf("%s: ref count %q: %w", ErrFileParse, fields[1], err) + return nil, fmt.Errorf("%w: ref count %q: %w", ErrFileParse, fields[1], err) } flags, err := u.parseFlags(fields[3]) if err != nil { - return nil, fmt.Errorf("%s: Unable to parse flags %q: %w", ErrFileParse, fields[3], err) + return nil, fmt.Errorf("%w: Unable to parse flags %q: %w", ErrFileParse, fields[3], err) } typ, err := u.parseType(fields[4]) if err != nil { - return nil, fmt.Errorf("%s: Failed to parse type %q: %w", ErrFileParse, fields[4], err) + return nil, fmt.Errorf("%w: Failed to parse type %q: %w", ErrFileParse, fields[4], err) } state, err := u.parseState(fields[5]) if err != nil { - return nil, fmt.Errorf("%s: Failed to parse state %q: %w", ErrFileParse, fields[5], err) + return nil, fmt.Errorf("%w: Failed to parse state %q: %w", ErrFileParse, fields[5], err) } var inode uint64 if hasInode { inode, err = u.parseInode(fields[6]) if err != nil { - return nil, fmt.Errorf("%s failed to parse inode %q: %w", ErrFileParse, fields[6], err) + return nil, fmt.Errorf("%w failed to parse inode %q: %w", ErrFileParse, fields[6], err) } } diff --git a/vendor/github.com/prometheus/procfs/net_wireless.go b/vendor/github.com/prometheus/procfs/net_wireless.go index 7443edca..7c597bc8 100644 --- a/vendor/github.com/prometheus/procfs/net_wireless.go +++ b/vendor/github.com/prometheus/procfs/net_wireless.go @@ -68,7 +68,7 @@ func (fs FS) Wireless() ([]*Wireless, error) { m, err := parseWireless(bytes.NewReader(b)) if err != nil { - return nil, fmt.Errorf("%s: wireless: %w", ErrFileParse, err) + return nil, fmt.Errorf("%w: wireless: %w", ErrFileParse, err) } return m, nil @@ -114,47 +114,47 @@ func parseWireless(r io.Reader) ([]*Wireless, error) { qlink, err := strconv.Atoi(strings.TrimSuffix(stats[1], ".")) if err != nil { - return nil, fmt.Errorf("%s: parse Quality:link as integer %q: %w", ErrFileParse, qlink, err) + return nil, fmt.Errorf("%w: parse Quality:link as integer %q: %w", ErrFileParse, qlink, err) } qlevel, err := strconv.Atoi(strings.TrimSuffix(stats[2], ".")) if err != nil { - return nil, fmt.Errorf("%s: Quality:level as integer %q: %w", ErrFileParse, qlevel, err) + return nil, fmt.Errorf("%w: Quality:level as integer %q: %w", ErrFileParse, qlevel, err) } qnoise, err := strconv.Atoi(strings.TrimSuffix(stats[3], ".")) if err != nil { - return nil, fmt.Errorf("%s: Quality:noise as integer %q: %w", ErrFileParse, qnoise, err) + return nil, fmt.Errorf("%w: Quality:noise as integer %q: %w", ErrFileParse, qnoise, err) } dnwid, err := strconv.Atoi(stats[4]) if err != nil { - return nil, fmt.Errorf("%s: Discarded:nwid as integer %q: %w", ErrFileParse, dnwid, err) + return nil, fmt.Errorf("%w: Discarded:nwid as integer %q: %w", ErrFileParse, dnwid, err) } dcrypt, err := strconv.Atoi(stats[5]) if err != nil { - return nil, fmt.Errorf("%s: Discarded:crypt as integer %q: %w", ErrFileParse, dcrypt, err) + return nil, fmt.Errorf("%w: Discarded:crypt as integer %q: %w", ErrFileParse, dcrypt, err) } dfrag, err := strconv.Atoi(stats[6]) if err != nil { - return nil, fmt.Errorf("%s: Discarded:frag as integer %q: %w", ErrFileParse, dfrag, err) + return nil, fmt.Errorf("%w: Discarded:frag as integer %q: %w", ErrFileParse, dfrag, err) } dretry, err := strconv.Atoi(stats[7]) if err != nil { - return nil, fmt.Errorf("%s: Discarded:retry as integer %q: %w", ErrFileParse, dretry, err) + return nil, fmt.Errorf("%w: Discarded:retry as integer %q: %w", ErrFileParse, dretry, err) } dmisc, err := strconv.Atoi(stats[8]) if err != nil { - return nil, fmt.Errorf("%s: Discarded:misc as integer %q: %w", ErrFileParse, dmisc, err) + return nil, fmt.Errorf("%w: Discarded:misc as integer %q: %w", ErrFileParse, dmisc, err) } mbeacon, err := strconv.Atoi(stats[9]) if err != nil { - return nil, fmt.Errorf("%s: Missed:beacon as integer %q: %w", ErrFileParse, mbeacon, err) + return nil, fmt.Errorf("%w: Missed:beacon as integer %q: %w", ErrFileParse, mbeacon, err) } w := &Wireless{ @@ -175,7 +175,7 @@ func parseWireless(r io.Reader) ([]*Wireless, error) { } if err := scanner.Err(); err != nil { - return nil, fmt.Errorf("%s: Failed to scan /proc/net/wireless: %w", ErrFileRead, err) + return nil, fmt.Errorf("%w: Failed to scan /proc/net/wireless: %w", ErrFileRead, err) } return interfaces, nil diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go index d1f71caa..14279636 100644 --- a/vendor/github.com/prometheus/procfs/proc.go +++ b/vendor/github.com/prometheus/procfs/proc.go @@ -111,7 +111,7 @@ func (fs FS) AllProcs() (Procs, error) { names, err := d.Readdirnames(-1) if err != nil { - return Procs{}, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, names, err) + return Procs{}, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, names, err) } p := Procs{} @@ -137,7 +137,7 @@ func (p Proc) CmdLine() ([]string, error) { return []string{}, nil } - return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil + return strings.Split(string(bytes.TrimRight(data, "\x00")), "\x00"), nil } // Wchan returns the wchan (wait channel) of a process. @@ -212,7 +212,7 @@ func (p Proc) FileDescriptors() ([]uintptr, error) { for i, n := range names { fd, err := strconv.ParseInt(n, 10, 32) if err != nil { - return nil, fmt.Errorf("%s: Cannot parse line: %v: %w", ErrFileParse, i, err) + return nil, fmt.Errorf("%w: Cannot parse line: %v: %w", ErrFileParse, i, err) } fds[i] = uintptr(fd) } @@ -297,7 +297,7 @@ func (p Proc) fileDescriptors() ([]string, error) { names, err := d.Readdirnames(-1) if err != nil { - return nil, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, names, err) + return nil, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, names, err) } return names, nil diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go index c86d815d..9530b14b 100644 --- a/vendor/github.com/prometheus/procfs/proc_limits.go +++ b/vendor/github.com/prometheus/procfs/proc_limits.go @@ -154,7 +154,7 @@ func parseUint(s string) (uint64, error) { } i, err := strconv.ParseUint(s, 10, 64) if err != nil { - return 0, fmt.Errorf("%s: couldn't parse value %q: %w", ErrFileParse, s, err) + return 0, fmt.Errorf("%w: couldn't parse value %q: %w", ErrFileParse, s, err) } return i, nil } diff --git a/vendor/github.com/prometheus/procfs/proc_ns.go b/vendor/github.com/prometheus/procfs/proc_ns.go index c2266675..0f8f847f 100644 --- a/vendor/github.com/prometheus/procfs/proc_ns.go +++ b/vendor/github.com/prometheus/procfs/proc_ns.go @@ -40,7 +40,7 @@ func (p Proc) Namespaces() (Namespaces, error) { names, err := d.Readdirnames(-1) if err != nil { - return nil, fmt.Errorf("%s: failed to read contents of ns dir: %w", ErrFileRead, err) + return nil, fmt.Errorf("%w: failed to read contents of ns dir: %w", ErrFileRead, err) } ns := make(Namespaces, len(names)) @@ -58,7 +58,7 @@ func (p Proc) Namespaces() (Namespaces, error) { typ := fields[0] inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32) if err != nil { - return nil, fmt.Errorf("%s: inode from %q: %w", ErrFileParse, fields[1], err) + return nil, fmt.Errorf("%w: inode from %q: %w", ErrFileParse, fields[1], err) } ns[name] = Namespace{typ, uint32(inode)} diff --git a/vendor/github.com/prometheus/procfs/proc_psi.go b/vendor/github.com/prometheus/procfs/proc_psi.go index fe9dbb42..ccd35f15 100644 --- a/vendor/github.com/prometheus/procfs/proc_psi.go +++ b/vendor/github.com/prometheus/procfs/proc_psi.go @@ -61,7 +61,7 @@ type PSIStats struct { func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) { data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource))) if err != nil { - return PSIStats{}, fmt.Errorf("%s: psi_stats: unavailable for %q: %w", ErrFileRead, resource, err) + return PSIStats{}, fmt.Errorf("%w: psi_stats: unavailable for %q: %w", ErrFileRead, resource, err) } return parsePSIStats(bytes.NewReader(data)) diff --git a/vendor/github.com/prometheus/procfs/proc_smaps.go b/vendor/github.com/prometheus/procfs/proc_smaps.go index ad8785a4..09060e82 100644 --- a/vendor/github.com/prometheus/procfs/proc_smaps.go +++ b/vendor/github.com/prometheus/procfs/proc_smaps.go @@ -127,7 +127,7 @@ func (s *ProcSMapsRollup) parseLine(line string) error { } v := strings.TrimSpace(kv[1]) - v = strings.TrimRight(v, " kB") + v = strings.TrimSuffix(v, " kB") vKBytes, err := strconv.ParseUint(v, 10, 64) if err != nil { diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go index 923e5500..06a8d931 100644 --- a/vendor/github.com/prometheus/procfs/proc_stat.go +++ b/vendor/github.com/prometheus/procfs/proc_stat.go @@ -110,6 +110,11 @@ type ProcStat struct { Policy uint // Aggregated block I/O delays, measured in clock ticks (centiseconds). DelayAcctBlkIOTicks uint64 + // Guest time of the process (time spent running a virtual CPU for a guest + // operating system), measured in clock ticks. + GuestTime int + // Guest time of the process's children, measured in clock ticks. + CGuestTime int proc FS } @@ -189,6 +194,8 @@ func (p Proc) Stat() (ProcStat, error) { &s.RTPriority, &s.Policy, &s.DelayAcctBlkIOTicks, + &s.GuestTime, + &s.CGuestTime, ) if err != nil { return ProcStat{}, err diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go index 46307f57..a055197c 100644 --- a/vendor/github.com/prometheus/procfs/proc_status.go +++ b/vendor/github.com/prometheus/procfs/proc_status.go @@ -15,6 +15,7 @@ package procfs import ( "bytes" + "math/bits" "sort" "strconv" "strings" @@ -76,9 +77,9 @@ type ProcStatus struct { NonVoluntaryCtxtSwitches uint64 // UIDs of the process (Real, effective, saved set, and filesystem UIDs) - UIDs [4]string + UIDs [4]uint64 // GIDs of the process (Real, effective, saved set, and filesystem GIDs) - GIDs [4]string + GIDs [4]uint64 // CpusAllowedList: List of cpu cores processes are allowed to run on. CpusAllowedList []uint64 @@ -113,22 +114,37 @@ func (p Proc) NewStatus() (ProcStatus, error) { // convert kB to B vBytes := vKBytes * 1024 - s.fillStatus(k, v, vKBytes, vBytes) + err = s.fillStatus(k, v, vKBytes, vBytes) + if err != nil { + return ProcStatus{}, err + } } return s, nil } -func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) { +func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) error { switch k { case "Tgid": s.TGID = int(vUint) case "Name": s.Name = vString case "Uid": - copy(s.UIDs[:], strings.Split(vString, "\t")) + var err error + for i, v := range strings.Split(vString, "\t") { + s.UIDs[i], err = strconv.ParseUint(v, 10, bits.UintSize) + if err != nil { + return err + } + } case "Gid": - copy(s.GIDs[:], strings.Split(vString, "\t")) + var err error + for i, v := range strings.Split(vString, "\t") { + s.GIDs[i], err = strconv.ParseUint(v, 10, bits.UintSize) + if err != nil { + return err + } + } case "NSpid": s.NSpids = calcNSPidsList(vString) case "VmPeak": @@ -173,6 +189,7 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt s.CpusAllowedList = calcCpusAllowedList(vString) } + return nil } // TotalCtxtSwitches returns the total context switch. diff --git a/vendor/github.com/prometheus/procfs/proc_sys.go b/vendor/github.com/prometheus/procfs/proc_sys.go index 12c5bf05..5eefbe2e 100644 --- a/vendor/github.com/prometheus/procfs/proc_sys.go +++ b/vendor/github.com/prometheus/procfs/proc_sys.go @@ -44,7 +44,7 @@ func (fs FS) SysctlInts(sysctl string) ([]int, error) { vp := util.NewValueParser(f) values[i] = vp.Int() if err := vp.Err(); err != nil { - return nil, fmt.Errorf("%s: field %d in sysctl %s is not a valid int: %w", ErrFileParse, i, sysctl, err) + return nil, fmt.Errorf("%w: field %d in sysctl %s is not a valid int: %w", ErrFileParse, i, sysctl, err) } } return values, nil diff --git a/vendor/github.com/prometheus/procfs/softirqs.go b/vendor/github.com/prometheus/procfs/softirqs.go index b8fad677..28708e07 100644 --- a/vendor/github.com/prometheus/procfs/softirqs.go +++ b/vendor/github.com/prometheus/procfs/softirqs.go @@ -74,7 +74,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.Hi = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.Hi[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err) + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "TIMER:": @@ -82,7 +82,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.Timer = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.Timer[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err) + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "NET_TX:": @@ -90,7 +90,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.NetTx = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.NetTx[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err) + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "NET_RX:": @@ -98,7 +98,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.NetRx = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.NetRx[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err) + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "BLOCK:": @@ -106,7 +106,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.Block = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.Block[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err) + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "IRQ_POLL:": @@ -114,7 +114,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.IRQPoll = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.IRQPoll[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err) + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "TASKLET:": @@ -122,7 +122,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.Tasklet = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.Tasklet[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err) + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "SCHED:": @@ -130,7 +130,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.Sched = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.Sched[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err) + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "HRTIMER:": @@ -138,7 +138,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.HRTimer = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.HRTimer[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err) + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "RCU:": @@ -146,14 +146,14 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) { softirqs.RCU = make([]uint64, len(perCPU)) for i, count := range perCPU { if softirqs.RCU[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("%s: couldn't parse %q (RCU%d): %w", ErrFileParse, count, i, err) + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (RCU%d): %w", ErrFileParse, count, i, err) } } } } if err := scanner.Err(); err != nil { - return Softirqs{}, fmt.Errorf("%s: couldn't parse softirqs: %w", ErrFileParse, err) + return Softirqs{}, fmt.Errorf("%w: couldn't parse softirqs: %w", ErrFileParse, err) } return softirqs, scanner.Err() diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go index 34fc3ee2..e36b41c1 100644 --- a/vendor/github.com/prometheus/procfs/stat.go +++ b/vendor/github.com/prometheus/procfs/stat.go @@ -93,7 +93,7 @@ func parseCPUStat(line string) (CPUStat, int64, error) { &cpuStat.Guest, &cpuStat.GuestNice) if err != nil && err != io.EOF { - return CPUStat{}, -1, fmt.Errorf("%s: couldn't parse %q (cpu): %w", ErrFileParse, line, err) + return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): %w", ErrFileParse, line, err) } if count == 0 { return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): 0 elements parsed", ErrFileParse, line) @@ -116,7 +116,7 @@ func parseCPUStat(line string) (CPUStat, int64, error) { cpuID, err := strconv.ParseInt(cpu[3:], 10, 64) if err != nil { - return CPUStat{}, -1, fmt.Errorf("%s: couldn't parse %q (cpu/cpuid): %w", ErrFileParse, line, err) + return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu/cpuid): %w", ErrFileParse, line, err) } return cpuStat, cpuID, nil @@ -136,7 +136,7 @@ func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) { &softIRQStat.Hrtimer, &softIRQStat.Rcu) if err != nil { - return SoftIRQStat{}, 0, fmt.Errorf("%s: couldn't parse %q (softirq): %w", ErrFileParse, line, err) + return SoftIRQStat{}, 0, fmt.Errorf("%w: couldn't parse %q (softirq): %w", ErrFileParse, line, err) } return softIRQStat, total, nil @@ -201,34 +201,34 @@ func parseStat(r io.Reader, fileName string) (Stat, error) { switch { case parts[0] == "btime": if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("%s: couldn't parse %q (btime): %w", ErrFileParse, parts[1], err) + return Stat{}, fmt.Errorf("%w: couldn't parse %q (btime): %w", ErrFileParse, parts[1], err) } case parts[0] == "intr": if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("%s: couldn't parse %q (intr): %w", ErrFileParse, parts[1], err) + return Stat{}, fmt.Errorf("%w: couldn't parse %q (intr): %w", ErrFileParse, parts[1], err) } numberedIRQs := parts[2:] stat.IRQ = make([]uint64, len(numberedIRQs)) for i, count := range numberedIRQs { if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Stat{}, fmt.Errorf("%s: couldn't parse %q (intr%d): %w", ErrFileParse, count, i, err) + return Stat{}, fmt.Errorf("%w: couldn't parse %q (intr%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "ctxt": if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("%s: couldn't parse %q (ctxt): %w", ErrFileParse, parts[1], err) + return Stat{}, fmt.Errorf("%w: couldn't parse %q (ctxt): %w", ErrFileParse, parts[1], err) } case parts[0] == "processes": if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("%s: couldn't parse %q (processes): %w", ErrFileParse, parts[1], err) + return Stat{}, fmt.Errorf("%w: couldn't parse %q (processes): %w", ErrFileParse, parts[1], err) } case parts[0] == "procs_running": if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("%s: couldn't parse %q (procs_running): %w", ErrFileParse, parts[1], err) + return Stat{}, fmt.Errorf("%w: couldn't parse %q (procs_running): %w", ErrFileParse, parts[1], err) } case parts[0] == "procs_blocked": if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("%s: couldn't parse %q (procs_blocked): %w", ErrFileParse, parts[1], err) + return Stat{}, fmt.Errorf("%w: couldn't parse %q (procs_blocked): %w", ErrFileParse, parts[1], err) } case parts[0] == "softirq": softIRQStats, total, err := parseSoftIRQStat(line) @@ -251,7 +251,7 @@ func parseStat(r io.Reader, fileName string) (Stat, error) { } if err := scanner.Err(); err != nil { - return Stat{}, fmt.Errorf("%s: couldn't parse %q: %w", ErrFileParse, fileName, err) + return Stat{}, fmt.Errorf("%w: couldn't parse %q: %w", ErrFileParse, fileName, err) } return stat, nil diff --git a/vendor/github.com/prometheus/procfs/swaps.go b/vendor/github.com/prometheus/procfs/swaps.go index fa00f555..65fec834 100644 --- a/vendor/github.com/prometheus/procfs/swaps.go +++ b/vendor/github.com/prometheus/procfs/swaps.go @@ -74,15 +74,15 @@ func parseSwapString(swapString string) (*Swap, error) { swap.Size, err = strconv.Atoi(swapFields[2]) if err != nil { - return nil, fmt.Errorf("%s: invalid swap size: %s: %w", ErrFileParse, swapFields[2], err) + return nil, fmt.Errorf("%w: invalid swap size: %s: %w", ErrFileParse, swapFields[2], err) } swap.Used, err = strconv.Atoi(swapFields[3]) if err != nil { - return nil, fmt.Errorf("%s: invalid swap used: %s: %w", ErrFileParse, swapFields[3], err) + return nil, fmt.Errorf("%w: invalid swap used: %s: %w", ErrFileParse, swapFields[3], err) } swap.Priority, err = strconv.Atoi(swapFields[4]) if err != nil { - return nil, fmt.Errorf("%s: invalid swap priority: %s: %w", ErrFileParse, swapFields[4], err) + return nil, fmt.Errorf("%w: invalid swap priority: %s: %w", ErrFileParse, swapFields[4], err) } return swap, nil diff --git a/vendor/github.com/prometheus/procfs/thread.go b/vendor/github.com/prometheus/procfs/thread.go index df2215ec..80e0e947 100644 --- a/vendor/github.com/prometheus/procfs/thread.go +++ b/vendor/github.com/prometheus/procfs/thread.go @@ -45,7 +45,7 @@ func (fs FS) AllThreads(pid int) (Procs, error) { names, err := d.Readdirnames(-1) if err != nil { - return Procs{}, fmt.Errorf("%s: could not read %q: %w", ErrFileRead, d.Name(), err) + return Procs{}, fmt.Errorf("%w: could not read %q: %w", ErrFileRead, d.Name(), err) } t := Procs{} diff --git a/vendor/github.com/prometheus/procfs/zoneinfo.go b/vendor/github.com/prometheus/procfs/zoneinfo.go index ce5fefa5..e54d94b0 100644 --- a/vendor/github.com/prometheus/procfs/zoneinfo.go +++ b/vendor/github.com/prometheus/procfs/zoneinfo.go @@ -75,11 +75,11 @@ var nodeZoneRE = regexp.MustCompile(`(\d+), zone\s+(\w+)`) func (fs FS) Zoneinfo() ([]Zoneinfo, error) { data, err := os.ReadFile(fs.proc.Path("zoneinfo")) if err != nil { - return nil, fmt.Errorf("%s: error reading zoneinfo %q: %w", ErrFileRead, fs.proc.Path("zoneinfo"), err) + return nil, fmt.Errorf("%w: error reading zoneinfo %q: %w", ErrFileRead, fs.proc.Path("zoneinfo"), err) } zoneinfo, err := parseZoneinfo(data) if err != nil { - return nil, fmt.Errorf("%s: error parsing zoneinfo %q: %w", ErrFileParse, fs.proc.Path("zoneinfo"), err) + return nil, fmt.Errorf("%w: error parsing zoneinfo %q: %w", ErrFileParse, fs.proc.Path("zoneinfo"), err) } return zoneinfo, nil } diff --git a/vendor/github.com/prometheus/prometheus/model/exemplar/exemplar.go b/vendor/github.com/prometheus/prometheus/model/exemplar/exemplar.go new file mode 100644 index 00000000..2c28b172 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/model/exemplar/exemplar.go @@ -0,0 +1,67 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package exemplar + +import "github.com/prometheus/prometheus/model/labels" + +// ExemplarMaxLabelSetLength is defined by OpenMetrics: "The combined length of +// the label names and values of an Exemplar's LabelSet MUST NOT exceed 128 +// UTF-8 characters." +// https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#exemplars +const ExemplarMaxLabelSetLength = 128 + +// Exemplar is additional information associated with a time series. +type Exemplar struct { + Labels labels.Labels `json:"labels"` + Value float64 `json:"value"` + Ts int64 `json:"timestamp"` + HasTs bool +} + +type QueryResult struct { + SeriesLabels labels.Labels `json:"seriesLabels"` + Exemplars []Exemplar `json:"exemplars"` +} + +// Equals compares if the exemplar e is the same as e2. Note that if HasTs is false for +// both exemplars then the timestamps will be ignored for the comparison. This can come up +// when an exemplar is exported without it's own timestamp, in which case the scrape timestamp +// is assigned to the Ts field. However we still want to treat the same exemplar, scraped without +// an exported timestamp, as a duplicate of itself for each subsequent scrape. +func (e Exemplar) Equals(e2 Exemplar) bool { + if !labels.Equal(e.Labels, e2.Labels) { + return false + } + + if (e.HasTs || e2.HasTs) && e.Ts != e2.Ts { + return false + } + + return e.Value == e2.Value +} + +// Compare first timestamps, then values, then labels. +func Compare(a, b Exemplar) int { + if a.Ts < b.Ts { + return -1 + } else if a.Ts > b.Ts { + return 1 + } + if a.Value < b.Value { + return -1 + } else if a.Value > b.Value { + return 1 + } + return labels.Compare(a.Labels, b.Labels) +} diff --git a/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go b/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go new file mode 100644 index 00000000..a6ad47ac --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go @@ -0,0 +1,1359 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package histogram + +import ( + "fmt" + "math" + "strings" +) + +// FloatHistogram is similar to Histogram but uses float64 for all +// counts. Additionally, bucket counts are absolute and not deltas. +// +// A FloatHistogram is needed by PromQL to handle operations that might result +// in fractional counts. Since the counts in a histogram are unlikely to be too +// large to be represented precisely by a float64, a FloatHistogram can also be +// used to represent a histogram with integer counts and thus serves as a more +// generalized representation. +type FloatHistogram struct { + // Counter reset information. + CounterResetHint CounterResetHint + // Currently valid schema numbers are -4 <= n <= 8 for exponential buckets. + // They are all for base-2 bucket schemas, where 1 is a bucket boundary in + // each case, and then each power of two is divided into 2^n logarithmic buckets. + // Or in other words, each bucket boundary is the previous boundary times + // 2^(2^-n). Another valid schema number is -53 for custom buckets, defined by + // the CustomValues field. + Schema int32 + // Width of the zero bucket. + ZeroThreshold float64 + // Observations falling into the zero bucket. Must be zero or positive. + ZeroCount float64 + // Total number of observations. Must be zero or positive. + Count float64 + // Sum of observations. This is also used as the stale marker. + Sum float64 + // Spans for positive and negative buckets (see Span below). + PositiveSpans, NegativeSpans []Span + // Observation counts in buckets. Each represents an absolute count and + // must be zero or positive. + PositiveBuckets, NegativeBuckets []float64 + // Holds the custom (usually upper) bounds for bucket definitions, otherwise nil. + // This slice is interned, to be treated as immutable and copied by reference. + // These numbers should be strictly increasing. This field is only used when the + // schema is for custom buckets, and the ZeroThreshold, ZeroCount, NegativeSpans + // and NegativeBuckets fields are not used in that case. + CustomValues []float64 +} + +func (h *FloatHistogram) UsesCustomBuckets() bool { + return IsCustomBucketsSchema(h.Schema) +} + +// Copy returns a deep copy of the Histogram. +func (h *FloatHistogram) Copy() *FloatHistogram { + c := FloatHistogram{ + CounterResetHint: h.CounterResetHint, + Schema: h.Schema, + Count: h.Count, + Sum: h.Sum, + } + + if h.UsesCustomBuckets() { + if len(h.CustomValues) != 0 { + c.CustomValues = make([]float64, len(h.CustomValues)) + copy(c.CustomValues, h.CustomValues) + } + } else { + c.ZeroThreshold = h.ZeroThreshold + c.ZeroCount = h.ZeroCount + + if len(h.NegativeSpans) != 0 { + c.NegativeSpans = make([]Span, len(h.NegativeSpans)) + copy(c.NegativeSpans, h.NegativeSpans) + } + if len(h.NegativeBuckets) != 0 { + c.NegativeBuckets = make([]float64, len(h.NegativeBuckets)) + copy(c.NegativeBuckets, h.NegativeBuckets) + } + } + + if len(h.PositiveSpans) != 0 { + c.PositiveSpans = make([]Span, len(h.PositiveSpans)) + copy(c.PositiveSpans, h.PositiveSpans) + } + if len(h.PositiveBuckets) != 0 { + c.PositiveBuckets = make([]float64, len(h.PositiveBuckets)) + copy(c.PositiveBuckets, h.PositiveBuckets) + } + + return &c +} + +// CopyTo makes a deep copy into the given FloatHistogram. +// The destination object has to be a non-nil pointer. +func (h *FloatHistogram) CopyTo(to *FloatHistogram) { + to.CounterResetHint = h.CounterResetHint + to.Schema = h.Schema + to.Count = h.Count + to.Sum = h.Sum + + if h.UsesCustomBuckets() { + to.ZeroThreshold = 0 + to.ZeroCount = 0 + + to.NegativeSpans = clearIfNotNil(to.NegativeSpans) + to.NegativeBuckets = clearIfNotNil(to.NegativeBuckets) + + to.CustomValues = resize(to.CustomValues, len(h.CustomValues)) + copy(to.CustomValues, h.CustomValues) + } else { + to.ZeroThreshold = h.ZeroThreshold + to.ZeroCount = h.ZeroCount + + to.NegativeSpans = resize(to.NegativeSpans, len(h.NegativeSpans)) + copy(to.NegativeSpans, h.NegativeSpans) + + to.NegativeBuckets = resize(to.NegativeBuckets, len(h.NegativeBuckets)) + copy(to.NegativeBuckets, h.NegativeBuckets) + + to.CustomValues = clearIfNotNil(to.CustomValues) + } + + to.PositiveSpans = resize(to.PositiveSpans, len(h.PositiveSpans)) + copy(to.PositiveSpans, h.PositiveSpans) + + to.PositiveBuckets = resize(to.PositiveBuckets, len(h.PositiveBuckets)) + copy(to.PositiveBuckets, h.PositiveBuckets) +} + +// CopyToSchema works like Copy, but the returned deep copy has the provided +// target schema, which must be ≤ the original schema (i.e. it must have a lower +// resolution). This method panics if a custom buckets schema is used in the +// receiving FloatHistogram or as the provided targetSchema. +func (h *FloatHistogram) CopyToSchema(targetSchema int32) *FloatHistogram { + if targetSchema == h.Schema { + // Fast path. + return h.Copy() + } + if h.UsesCustomBuckets() { + panic(fmt.Errorf("cannot reduce resolution to %d when there are custom buckets", targetSchema)) + } + if IsCustomBucketsSchema(targetSchema) { + panic("cannot reduce resolution to custom buckets schema") + } + if targetSchema > h.Schema { + panic(fmt.Errorf("cannot copy from schema %d to %d", h.Schema, targetSchema)) + } + c := FloatHistogram{ + Schema: targetSchema, + ZeroThreshold: h.ZeroThreshold, + ZeroCount: h.ZeroCount, + Count: h.Count, + Sum: h.Sum, + } + + c.PositiveSpans, c.PositiveBuckets = reduceResolution(h.PositiveSpans, h.PositiveBuckets, h.Schema, targetSchema, false, false) + c.NegativeSpans, c.NegativeBuckets = reduceResolution(h.NegativeSpans, h.NegativeBuckets, h.Schema, targetSchema, false, false) + + return &c +} + +// String returns a string representation of the Histogram. +func (h *FloatHistogram) String() string { + var sb strings.Builder + fmt.Fprintf(&sb, "{count:%g, sum:%g", h.Count, h.Sum) + + var nBuckets []Bucket[float64] + for it := h.NegativeBucketIterator(); it.Next(); { + bucket := it.At() + if bucket.Count != 0 { + nBuckets = append(nBuckets, it.At()) + } + } + for i := len(nBuckets) - 1; i >= 0; i-- { + fmt.Fprintf(&sb, ", %s", nBuckets[i].String()) + } + + if h.ZeroCount != 0 { + fmt.Fprintf(&sb, ", %s", h.ZeroBucket().String()) + } + + for it := h.PositiveBucketIterator(); it.Next(); { + bucket := it.At() + if bucket.Count != 0 { + fmt.Fprintf(&sb, ", %s", bucket.String()) + } + } + + sb.WriteRune('}') + return sb.String() +} + +// TestExpression returns the string representation of this histogram as it is used in the internal PromQL testing +// framework as well as in promtool rules unit tests. +// The syntax is described in https://prometheus.io/docs/prometheus/latest/configuration/unit_testing_rules/#series +func (h *FloatHistogram) TestExpression() string { + var res []string + m := h.Copy() + + m.Compact(math.MaxInt) // Compact to reduce the number of positive and negative spans to 1. + + if m.Schema != 0 { + res = append(res, fmt.Sprintf("schema:%d", m.Schema)) + } + if m.Count != 0 { + res = append(res, fmt.Sprintf("count:%g", m.Count)) + } + if m.Sum != 0 { + res = append(res, fmt.Sprintf("sum:%g", m.Sum)) + } + if m.ZeroCount != 0 { + res = append(res, fmt.Sprintf("z_bucket:%g", m.ZeroCount)) + } + if m.ZeroThreshold != 0 { + res = append(res, fmt.Sprintf("z_bucket_w:%g", m.ZeroThreshold)) + } + if m.UsesCustomBuckets() { + res = append(res, fmt.Sprintf("custom_values:%g", m.CustomValues)) + } + + switch m.CounterResetHint { + case UnknownCounterReset: + // Unknown is the default, don't add anything. + case CounterReset: + res = append(res, "counter_reset_hint:reset") + case NotCounterReset: + res = append(res, "counter_reset_hint:not_reset") + case GaugeType: + res = append(res, "counter_reset_hint:gauge") + } + + addBuckets := func(kind, bucketsKey, offsetKey string, buckets []float64, spans []Span) []string { + if len(spans) > 1 { + panic(fmt.Sprintf("histogram with multiple %s spans not supported", kind)) + } + for _, span := range spans { + if span.Offset != 0 { + res = append(res, fmt.Sprintf("%s:%d", offsetKey, span.Offset)) + } + } + + var bucketStr []string + for _, bucket := range buckets { + bucketStr = append(bucketStr, fmt.Sprintf("%g", bucket)) + } + if len(bucketStr) > 0 { + res = append(res, fmt.Sprintf("%s:[%s]", bucketsKey, strings.Join(bucketStr, " "))) + } + return res + } + res = addBuckets("positive", "buckets", "offset", m.PositiveBuckets, m.PositiveSpans) + res = addBuckets("negative", "n_buckets", "n_offset", m.NegativeBuckets, m.NegativeSpans) + return "{{" + strings.Join(res, " ") + "}}" +} + +// ZeroBucket returns the zero bucket. This method panics if the schema is for custom buckets. +func (h *FloatHistogram) ZeroBucket() Bucket[float64] { + if h.UsesCustomBuckets() { + panic("histograms with custom buckets have no zero bucket") + } + return Bucket[float64]{ + Lower: -h.ZeroThreshold, + Upper: h.ZeroThreshold, + LowerInclusive: true, + UpperInclusive: true, + Count: h.ZeroCount, + // Index is irrelevant for the zero bucket. + } +} + +// Mul multiplies the FloatHistogram by the provided factor, i.e. it scales all +// bucket counts including the zero bucket and the count and the sum of +// observations. The bucket layout stays the same. This method changes the +// receiving histogram directly (rather than acting on a copy). It returns a +// pointer to the receiving histogram for convenience. +func (h *FloatHistogram) Mul(factor float64) *FloatHistogram { + h.ZeroCount *= factor + h.Count *= factor + h.Sum *= factor + for i := range h.PositiveBuckets { + h.PositiveBuckets[i] *= factor + } + for i := range h.NegativeBuckets { + h.NegativeBuckets[i] *= factor + } + return h +} + +// Div works like Mul but divides instead of multiplies. +// When dividing by 0, everything will be set to Inf. +func (h *FloatHistogram) Div(scalar float64) *FloatHistogram { + h.ZeroCount /= scalar + h.Count /= scalar + h.Sum /= scalar + // Division by zero removes all buckets. + if scalar == 0 { + h.PositiveBuckets = nil + h.NegativeBuckets = nil + h.PositiveSpans = nil + h.NegativeSpans = nil + return h + } + for i := range h.PositiveBuckets { + h.PositiveBuckets[i] /= scalar + } + for i := range h.NegativeBuckets { + h.NegativeBuckets[i] /= scalar + } + return h +} + +// Add adds the provided other histogram to the receiving histogram. Count, Sum, +// and buckets from the other histogram are added to the corresponding +// components of the receiving histogram. Buckets in the other histogram that do +// not exist in the receiving histogram are inserted into the latter. The +// resulting histogram might have buckets with a population of zero or directly +// adjacent spans (offset=0). To normalize those, call the Compact method. +// +// The method reconciles differences in the zero threshold and in the schema, and +// changes them if needed. The other histogram will not be modified in any case. +// Adding is currently only supported between 2 exponential histograms, or between +// 2 custom buckets histograms with the exact same custom bounds. +// +// This method returns a pointer to the receiving histogram for convenience. +func (h *FloatHistogram) Add(other *FloatHistogram) (*FloatHistogram, error) { + if h.UsesCustomBuckets() != other.UsesCustomBuckets() { + return nil, ErrHistogramsIncompatibleSchema + } + if h.UsesCustomBuckets() && !FloatBucketsMatch(h.CustomValues, other.CustomValues) { + return nil, ErrHistogramsIncompatibleBounds + } + + switch { + case other.CounterResetHint == h.CounterResetHint: + // Adding apples to apples, all good. No need to change anything. + case h.CounterResetHint == GaugeType: + // Adding something else to a gauge. That's probably OK. Outcome is a gauge. + // Nothing to do since the receiver is already marked as gauge. + case other.CounterResetHint == GaugeType: + // Similar to before, but this time the receiver is "something else" and we have to change it to gauge. + h.CounterResetHint = GaugeType + case h.CounterResetHint == UnknownCounterReset: + // With the receiver's CounterResetHint being "unknown", this could still be legitimate + // if the caller knows what they are doing. Outcome is then again "unknown". + // No need to do anything since the receiver's CounterResetHint is already "unknown". + case other.CounterResetHint == UnknownCounterReset: + // Similar to before, but now we have to set the receiver's CounterResetHint to "unknown". + h.CounterResetHint = UnknownCounterReset + default: + // All other cases shouldn't actually happen. + // They are a direct collision of CounterReset and NotCounterReset. + // Conservatively set the CounterResetHint to "unknown" and issue a warning. + h.CounterResetHint = UnknownCounterReset + // TODO(trevorwhitney): Actually issue the warning as soon as the plumbing for it is in place + } + + if !h.UsesCustomBuckets() { + otherZeroCount := h.reconcileZeroBuckets(other) + h.ZeroCount += otherZeroCount + } + h.Count += other.Count + h.Sum += other.Sum + + var ( + hPositiveSpans = h.PositiveSpans + hPositiveBuckets = h.PositiveBuckets + otherPositiveSpans = other.PositiveSpans + otherPositiveBuckets = other.PositiveBuckets + ) + + if h.UsesCustomBuckets() { + h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets) + return h, nil + } + + var ( + hNegativeSpans = h.NegativeSpans + hNegativeBuckets = h.NegativeBuckets + otherNegativeSpans = other.NegativeSpans + otherNegativeBuckets = other.NegativeBuckets + ) + + switch { + case other.Schema < h.Schema: + hPositiveSpans, hPositiveBuckets = reduceResolution(hPositiveSpans, hPositiveBuckets, h.Schema, other.Schema, false, true) + hNegativeSpans, hNegativeBuckets = reduceResolution(hNegativeSpans, hNegativeBuckets, h.Schema, other.Schema, false, true) + h.Schema = other.Schema + + case other.Schema > h.Schema: + otherPositiveSpans, otherPositiveBuckets = reduceResolution(otherPositiveSpans, otherPositiveBuckets, other.Schema, h.Schema, false, false) + otherNegativeSpans, otherNegativeBuckets = reduceResolution(otherNegativeSpans, otherNegativeBuckets, other.Schema, h.Schema, false, false) + } + + h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets) + h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, hNegativeSpans, hNegativeBuckets, otherNegativeSpans, otherNegativeBuckets) + + return h, nil +} + +// Sub works like Add but subtracts the other histogram. +func (h *FloatHistogram) Sub(other *FloatHistogram) (*FloatHistogram, error) { + if h.UsesCustomBuckets() != other.UsesCustomBuckets() { + return nil, ErrHistogramsIncompatibleSchema + } + if h.UsesCustomBuckets() && !FloatBucketsMatch(h.CustomValues, other.CustomValues) { + return nil, ErrHistogramsIncompatibleBounds + } + + if !h.UsesCustomBuckets() { + otherZeroCount := h.reconcileZeroBuckets(other) + h.ZeroCount -= otherZeroCount + } + h.Count -= other.Count + h.Sum -= other.Sum + + var ( + hPositiveSpans = h.PositiveSpans + hPositiveBuckets = h.PositiveBuckets + otherPositiveSpans = other.PositiveSpans + otherPositiveBuckets = other.PositiveBuckets + ) + + if h.UsesCustomBuckets() { + h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets) + return h, nil + } + + var ( + hNegativeSpans = h.NegativeSpans + hNegativeBuckets = h.NegativeBuckets + otherNegativeSpans = other.NegativeSpans + otherNegativeBuckets = other.NegativeBuckets + ) + + switch { + case other.Schema < h.Schema: + hPositiveSpans, hPositiveBuckets = reduceResolution(hPositiveSpans, hPositiveBuckets, h.Schema, other.Schema, false, true) + hNegativeSpans, hNegativeBuckets = reduceResolution(hNegativeSpans, hNegativeBuckets, h.Schema, other.Schema, false, true) + h.Schema = other.Schema + case other.Schema > h.Schema: + otherPositiveSpans, otherPositiveBuckets = reduceResolution(otherPositiveSpans, otherPositiveBuckets, other.Schema, h.Schema, false, false) + otherNegativeSpans, otherNegativeBuckets = reduceResolution(otherNegativeSpans, otherNegativeBuckets, other.Schema, h.Schema, false, false) + } + + h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets) + h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, hNegativeSpans, hNegativeBuckets, otherNegativeSpans, otherNegativeBuckets) + + return h, nil +} + +// Equals returns true if the given float histogram matches exactly. +// Exact match is when there are no new buckets (even empty) and no missing buckets, +// and all the bucket values match. Spans can have different empty length spans in between, +// but they must represent the same bucket layout to match. +// Sum, Count, ZeroCount and bucket values are compared based on their bit patterns +// because this method is about data equality rather than mathematical equality. +// We ignore fields that are not used based on the exponential / custom buckets schema, +// but check fields where differences may cause unintended behaviour even if they are not +// supposed to be used according to the schema. +func (h *FloatHistogram) Equals(h2 *FloatHistogram) bool { + if h2 == nil { + return false + } + + if h.Schema != h2.Schema || + math.Float64bits(h.Count) != math.Float64bits(h2.Count) || + math.Float64bits(h.Sum) != math.Float64bits(h2.Sum) { + return false + } + + if h.UsesCustomBuckets() { + if !FloatBucketsMatch(h.CustomValues, h2.CustomValues) { + return false + } + } + + if h.ZeroThreshold != h2.ZeroThreshold || + math.Float64bits(h.ZeroCount) != math.Float64bits(h2.ZeroCount) { + return false + } + + if !spansMatch(h.NegativeSpans, h2.NegativeSpans) { + return false + } + if !FloatBucketsMatch(h.NegativeBuckets, h2.NegativeBuckets) { + return false + } + + if !spansMatch(h.PositiveSpans, h2.PositiveSpans) { + return false + } + if !FloatBucketsMatch(h.PositiveBuckets, h2.PositiveBuckets) { + return false + } + + return true +} + +// Size returns the total size of the FloatHistogram, which includes the size of the pointer +// to FloatHistogram, all its fields, and all elements contained in slices. +// NOTE: this is only valid for 64 bit architectures. +func (h *FloatHistogram) Size() int { + // Size of each slice separately. + posSpanSize := len(h.PositiveSpans) * 8 // 8 bytes (int32 + uint32). + negSpanSize := len(h.NegativeSpans) * 8 // 8 bytes (int32 + uint32). + posBucketSize := len(h.PositiveBuckets) * 8 // 8 bytes (float64). + negBucketSize := len(h.NegativeBuckets) * 8 // 8 bytes (float64). + customBoundSize := len(h.CustomValues) * 8 // 8 bytes (float64). + + // Total size of the struct. + + // fh is 8 bytes. + // fh.CounterResetHint is 4 bytes (1 byte bool + 3 bytes padding). + // fh.Schema is 4 bytes. + // fh.ZeroThreshold is 8 bytes. + // fh.ZeroCount is 8 bytes. + // fh.Count is 8 bytes. + // fh.Sum is 8 bytes. + // fh.PositiveSpans is 24 bytes. + // fh.NegativeSpans is 24 bytes. + // fh.PositiveBuckets is 24 bytes. + // fh.NegativeBuckets is 24 bytes. + // fh.CustomValues is 24 bytes. + structSize := 168 + + return structSize + posSpanSize + negSpanSize + posBucketSize + negBucketSize + customBoundSize +} + +// Compact eliminates empty buckets at the beginning and end of each span, then +// merges spans that are consecutive or at most maxEmptyBuckets apart, and +// finally splits spans that contain more consecutive empty buckets than +// maxEmptyBuckets. (The actual implementation might do something more efficient +// but with the same result.) The compaction happens "in place" in the +// receiving histogram, but a pointer to it is returned for convenience. +// +// The ideal value for maxEmptyBuckets depends on circumstances. The motivation +// to set maxEmptyBuckets > 0 is the assumption that is less overhead to +// represent very few empty buckets explicitly within one span than cutting the +// one span into two to treat the empty buckets as a gap between the two spans, +// both in terms of storage requirement as well as in terms of encoding and +// decoding effort. However, the tradeoffs are subtle. For one, they are +// different in the exposition format vs. in a TSDB chunk vs. for the in-memory +// representation as Go types. In the TSDB, as an additional aspects, the span +// layout is only stored once per chunk, while many histograms with that same +// chunk layout are then only stored with their buckets (so that even a single +// empty bucket will be stored many times). +// +// For the Go types, an additional Span takes 8 bytes. Similarly, an additional +// bucket takes 8 bytes. Therefore, with a single separating empty bucket, both +// options have the same storage requirement, but the single-span solution is +// easier to iterate through. Still, the safest bet is to use maxEmptyBuckets==0 +// and only use a larger number if you know what you are doing. +func (h *FloatHistogram) Compact(maxEmptyBuckets int) *FloatHistogram { + h.PositiveBuckets, h.PositiveSpans = compactBuckets( + h.PositiveBuckets, h.PositiveSpans, maxEmptyBuckets, false, + ) + h.NegativeBuckets, h.NegativeSpans = compactBuckets( + h.NegativeBuckets, h.NegativeSpans, maxEmptyBuckets, false, + ) + return h +} + +// DetectReset returns true if the receiving histogram is missing any buckets +// that have a non-zero population in the provided previous histogram. It also +// returns true if any count (in any bucket, in the zero count, or in the count +// of observations, but NOT the sum of observations) is smaller in the receiving +// histogram compared to the previous histogram. Otherwise, it returns false. +// +// This method will shortcut to true if a CounterReset is detected, and shortcut +// to false if NotCounterReset is detected. Otherwise it will do the work to detect +// a reset. +// +// Special behavior in case the Schema or the ZeroThreshold are not the same in +// both histograms: +// +// - A decrease of the ZeroThreshold or an increase of the Schema (i.e. an +// increase of resolution) can only happen together with a reset. Thus, the +// method returns true in either case. +// +// - Upon an increase of the ZeroThreshold, the buckets in the previous +// histogram that fall within the new ZeroThreshold are added to the ZeroCount +// of the previous histogram (without mutating the provided previous +// histogram). The scenario that a populated bucket of the previous histogram +// is partially within, partially outside of the new ZeroThreshold, can only +// happen together with a counter reset and therefore shortcuts to returning +// true. +// +// - Upon a decrease of the Schema, the buckets of the previous histogram are +// merged so that they match the new, lower-resolution schema (again without +// mutating the provided previous histogram). +func (h *FloatHistogram) DetectReset(previous *FloatHistogram) bool { + if h.CounterResetHint == CounterReset { + return true + } + if h.CounterResetHint == NotCounterReset { + return false + } + // In all other cases of CounterResetHint (UnknownCounterReset and GaugeType), + // we go on as we would otherwise, for reasons explained below. + // + // If the CounterResetHint is UnknownCounterReset, we do not know yet if this histogram comes + // with a counter reset. Therefore, we have to do all the detailed work to find out if there + // is a counter reset or not. + // We do the same if the CounterResetHint is GaugeType, which should not happen, but PromQL still + // allows the user to apply functions to gauge histograms that are only meant for counter histograms. + // In this case, we treat the gauge histograms as counter histograms. A warning should be returned + // to the user in this case. + if h.Count < previous.Count { + return true + } + if h.UsesCustomBuckets() != previous.UsesCustomBuckets() || (h.UsesCustomBuckets() && !FloatBucketsMatch(h.CustomValues, previous.CustomValues)) { + // Mark that something has changed or that the application has been restarted. However, this does + // not matter so much since the change in schema will be handled directly in the chunks and PromQL + // functions. + return true + } + if h.Schema > previous.Schema { + return true + } + if h.ZeroThreshold < previous.ZeroThreshold { + // ZeroThreshold decreased. + return true + } + previousZeroCount, newThreshold := previous.zeroCountForLargerThreshold(h.ZeroThreshold) + if newThreshold != h.ZeroThreshold { + // ZeroThreshold is within a populated bucket in previous + // histogram. + return true + } + if h.ZeroCount < previousZeroCount { + return true + } + currIt := h.floatBucketIterator(true, h.ZeroThreshold, h.Schema) + prevIt := previous.floatBucketIterator(true, h.ZeroThreshold, h.Schema) + if detectReset(&currIt, &prevIt) { + return true + } + currIt = h.floatBucketIterator(false, h.ZeroThreshold, h.Schema) + prevIt = previous.floatBucketIterator(false, h.ZeroThreshold, h.Schema) + return detectReset(&currIt, &prevIt) +} + +func detectReset(currIt, prevIt *floatBucketIterator) bool { + if !prevIt.Next() { + return false // If no buckets in previous histogram, nothing can be reset. + } + prevBucket := prevIt.strippedAt() + if !currIt.Next() { + // No bucket in current, but at least one in previous + // histogram. Check if any of those are non-zero, in which case + // this is a reset. + for { + if prevBucket.count != 0 { + return true + } + if !prevIt.Next() { + return false + } + } + } + currBucket := currIt.strippedAt() + for { + // Forward currIt until we find the bucket corresponding to prevBucket. + for currBucket.index < prevBucket.index { + if !currIt.Next() { + // Reached end of currIt early, therefore + // previous histogram has a bucket that the + // current one does not have. Unless all + // remaining buckets in the previous histogram + // are unpopulated, this is a reset. + for { + if prevBucket.count != 0 { + return true + } + if !prevIt.Next() { + return false + } + } + } + currBucket = currIt.strippedAt() + } + if currBucket.index > prevBucket.index { + // Previous histogram has a bucket the current one does + // not have. If it's populated, it's a reset. + if prevBucket.count != 0 { + return true + } + } else { + // We have reached corresponding buckets in both iterators. + // We can finally compare the counts. + if currBucket.count < prevBucket.count { + return true + } + } + if !prevIt.Next() { + // Reached end of prevIt without finding offending buckets. + return false + } + prevBucket = prevIt.strippedAt() + } +} + +// PositiveBucketIterator returns a BucketIterator to iterate over all positive +// buckets in ascending order (starting next to the zero bucket and going up). +func (h *FloatHistogram) PositiveBucketIterator() BucketIterator[float64] { + it := h.floatBucketIterator(true, 0, h.Schema) + return &it +} + +// NegativeBucketIterator returns a BucketIterator to iterate over all negative +// buckets in descending order (starting next to the zero bucket and going +// down). +func (h *FloatHistogram) NegativeBucketIterator() BucketIterator[float64] { + it := h.floatBucketIterator(false, 0, h.Schema) + return &it +} + +// PositiveReverseBucketIterator returns a BucketIterator to iterate over all +// positive buckets in descending order (starting at the highest bucket and +// going down towards the zero bucket). +func (h *FloatHistogram) PositiveReverseBucketIterator() BucketIterator[float64] { + it := newReverseFloatBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true, h.CustomValues) + return &it +} + +// NegativeReverseBucketIterator returns a BucketIterator to iterate over all +// negative buckets in ascending order (starting at the lowest bucket and going +// up towards the zero bucket). +func (h *FloatHistogram) NegativeReverseBucketIterator() BucketIterator[float64] { + it := newReverseFloatBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false, nil) + return &it +} + +// AllBucketIterator returns a BucketIterator to iterate over all negative, +// zero, and positive buckets in ascending order (starting at the lowest bucket +// and going up). If the highest negative bucket or the lowest positive bucket +// overlap with the zero bucket, their upper or lower boundary, respectively, is +// set to the zero threshold. +func (h *FloatHistogram) AllBucketIterator() BucketIterator[float64] { + return &allFloatBucketIterator{ + h: h, + leftIter: newReverseFloatBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false, nil), + rightIter: h.floatBucketIterator(true, 0, h.Schema), + state: -1, + } +} + +// AllReverseBucketIterator returns a BucketIterator to iterate over all negative, +// zero, and positive buckets in descending order (starting at the lowest bucket +// and going up). If the highest negative bucket or the lowest positive bucket +// overlap with the zero bucket, their upper or lower boundary, respectively, is +// set to the zero threshold. +func (h *FloatHistogram) AllReverseBucketIterator() BucketIterator[float64] { + return &allFloatBucketIterator{ + h: h, + leftIter: newReverseFloatBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true, h.CustomValues), + rightIter: h.floatBucketIterator(false, 0, h.Schema), + state: -1, + } +} + +// Validate validates consistency between span and bucket slices. Also, buckets are checked +// against negative values. We check to make sure there are no unexpected fields or field values +// based on the exponential / custom buckets schema. +// We do not check for h.Count being at least as large as the sum of the +// counts in the buckets because floating point precision issues can +// create false positives here. +func (h *FloatHistogram) Validate() error { + var nCount, pCount float64 + if h.UsesCustomBuckets() { + if err := checkHistogramCustomBounds(h.CustomValues, h.PositiveSpans, len(h.PositiveBuckets)); err != nil { + return fmt.Errorf("custom buckets: %w", err) + } + if h.ZeroCount != 0 { + return fmt.Errorf("custom buckets: must have zero count of 0") + } + if h.ZeroThreshold != 0 { + return fmt.Errorf("custom buckets: must have zero threshold of 0") + } + if len(h.NegativeSpans) > 0 { + return fmt.Errorf("custom buckets: must not have negative spans") + } + if len(h.NegativeBuckets) > 0 { + return fmt.Errorf("custom buckets: must not have negative buckets") + } + } else { + if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil { + return fmt.Errorf("positive side: %w", err) + } + if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil { + return fmt.Errorf("negative side: %w", err) + } + err := checkHistogramBuckets(h.NegativeBuckets, &nCount, false) + if err != nil { + return fmt.Errorf("negative side: %w", err) + } + if h.CustomValues != nil { + return fmt.Errorf("histogram with exponential schema must not have custom bounds") + } + } + err := checkHistogramBuckets(h.PositiveBuckets, &pCount, false) + if err != nil { + return fmt.Errorf("positive side: %w", err) + } + + return nil +} + +// zeroCountForLargerThreshold returns what the histogram's zero count would be +// if the ZeroThreshold had the provided larger (or equal) value. If the +// provided value is less than the histogram's ZeroThreshold, the method panics. +// If the largerThreshold ends up within a populated bucket of the histogram, it +// is adjusted upwards to the lower limit of that bucket (all in terms of +// absolute values) and that bucket's count is included in the returned +// count. The adjusted threshold is returned, too. +func (h *FloatHistogram) zeroCountForLargerThreshold(largerThreshold float64) (count, threshold float64) { + // Fast path. + if largerThreshold == h.ZeroThreshold { + return h.ZeroCount, largerThreshold + } + if largerThreshold < h.ZeroThreshold { + panic(fmt.Errorf("new threshold %f is less than old threshold %f", largerThreshold, h.ZeroThreshold)) + } +outer: + for { + count = h.ZeroCount + i := h.PositiveBucketIterator() + for i.Next() { + b := i.At() + if b.Lower >= largerThreshold { + break + } + count += b.Count // Bucket to be merged into zero bucket. + if b.Upper > largerThreshold { + // New threshold ended up within a bucket. if it's + // populated, we need to adjust largerThreshold before + // we are done here. + if b.Count != 0 { + largerThreshold = b.Upper + } + break + } + } + i = h.NegativeBucketIterator() + for i.Next() { + b := i.At() + if b.Upper <= -largerThreshold { + break + } + count += b.Count // Bucket to be merged into zero bucket. + if b.Lower < -largerThreshold { + // New threshold ended up within a bucket. If + // it's populated, we need to adjust + // largerThreshold and have to redo the whole + // thing because the treatment of the positive + // buckets is invalid now. + if b.Count != 0 { + largerThreshold = -b.Lower + continue outer + } + break + } + } + return count, largerThreshold + } +} + +// trimBucketsInZeroBucket removes all buckets that are within the zero +// bucket. It assumes that the zero threshold is at a bucket boundary and that +// the counts in the buckets to remove are already part of the zero count. +func (h *FloatHistogram) trimBucketsInZeroBucket() { + i := h.PositiveBucketIterator() + bucketsIdx := 0 + for i.Next() { + b := i.At() + if b.Lower >= h.ZeroThreshold { + break + } + h.PositiveBuckets[bucketsIdx] = 0 + bucketsIdx++ + } + i = h.NegativeBucketIterator() + bucketsIdx = 0 + for i.Next() { + b := i.At() + if b.Upper <= -h.ZeroThreshold { + break + } + h.NegativeBuckets[bucketsIdx] = 0 + bucketsIdx++ + } + // We are abusing Compact to trim the buckets set to zero + // above. Premature compacting could cause additional cost, but this + // code path is probably rarely used anyway. + h.Compact(0) +} + +// reconcileZeroBuckets finds a zero bucket large enough to include the zero +// buckets of both histograms (the receiving histogram and the other histogram) +// with a zero threshold that is not within a populated bucket in either +// histogram. This method modifies the receiving histogram accordingly, but +// leaves the other histogram as is. Instead, it returns the zero count the +// other histogram would have if it were modified. +func (h *FloatHistogram) reconcileZeroBuckets(other *FloatHistogram) float64 { + otherZeroCount := other.ZeroCount + otherZeroThreshold := other.ZeroThreshold + + for otherZeroThreshold != h.ZeroThreshold { + if h.ZeroThreshold > otherZeroThreshold { + otherZeroCount, otherZeroThreshold = other.zeroCountForLargerThreshold(h.ZeroThreshold) + } + if otherZeroThreshold > h.ZeroThreshold { + h.ZeroCount, h.ZeroThreshold = h.zeroCountForLargerThreshold(otherZeroThreshold) + h.trimBucketsInZeroBucket() + } + } + return otherZeroCount +} + +// floatBucketIterator is a low-level constructor for bucket iterators. +// +// If positive is true, the returned iterator iterates through the positive +// buckets, otherwise through the negative buckets. +// +// Only for exponential schemas, if absoluteStartValue is < the lowest absolute +// value of any upper bucket boundary, the iterator starts with the first bucket. +// Otherwise, it will skip all buckets with an absolute value of their upper boundary ≤ +// absoluteStartValue. For custom bucket schemas, absoluteStartValue is ignored and +// no buckets are skipped. +// +// targetSchema must be ≤ the schema of FloatHistogram (and of course within the +// legal values for schemas in general). The buckets are merged to match the +// targetSchema prior to iterating (without mutating FloatHistogram), but custom buckets +// schemas cannot be merged with other schemas. +func (h *FloatHistogram) floatBucketIterator( + positive bool, absoluteStartValue float64, targetSchema int32, +) floatBucketIterator { + if h.UsesCustomBuckets() && targetSchema != h.Schema { + panic(fmt.Errorf("cannot merge from custom buckets schema to exponential schema")) + } + if !h.UsesCustomBuckets() && IsCustomBucketsSchema(targetSchema) { + panic(fmt.Errorf("cannot merge from exponential buckets schema to custom schema")) + } + if targetSchema > h.Schema { + panic(fmt.Errorf("cannot merge from schema %d to %d", h.Schema, targetSchema)) + } + i := floatBucketIterator{ + baseBucketIterator: baseBucketIterator[float64, float64]{ + schema: h.Schema, + positive: positive, + }, + targetSchema: targetSchema, + absoluteStartValue: absoluteStartValue, + boundReachedStartValue: absoluteStartValue == 0, + } + if positive { + i.spans = h.PositiveSpans + i.buckets = h.PositiveBuckets + i.customValues = h.CustomValues + } else { + i.spans = h.NegativeSpans + i.buckets = h.NegativeBuckets + } + return i +} + +// reverseFloatBucketIterator is a low-level constructor for reverse bucket iterators. +func newReverseFloatBucketIterator( + spans []Span, buckets []float64, schema int32, positive bool, customValues []float64, +) reverseFloatBucketIterator { + r := reverseFloatBucketIterator{ + baseBucketIterator: baseBucketIterator[float64, float64]{ + schema: schema, + spans: spans, + buckets: buckets, + positive: positive, + customValues: customValues, + }, + } + + r.spansIdx = len(r.spans) - 1 + r.bucketsIdx = len(r.buckets) - 1 + if r.spansIdx >= 0 { + r.idxInSpan = int32(r.spans[r.spansIdx].Length) - 1 + } + r.currIdx = 0 + for _, s := range r.spans { + r.currIdx += s.Offset + int32(s.Length) + } + + return r +} + +type floatBucketIterator struct { + baseBucketIterator[float64, float64] + + targetSchema int32 // targetSchema is the schema to merge to and must be ≤ schema. + origIdx int32 // The bucket index within the original schema. + absoluteStartValue float64 // Never return buckets with an upper bound ≤ this value. + + boundReachedStartValue bool // Has getBound reached absoluteStartValue already? +} + +func (i *floatBucketIterator) At() Bucket[float64] { + // Need to use i.targetSchema rather than i.baseBucketIterator.schema. + return i.baseBucketIterator.at(i.targetSchema) +} + +func (i *floatBucketIterator) Next() bool { + if i.spansIdx >= len(i.spans) { + return false + } + + if i.schema == i.targetSchema { + // Fast path for the common case. + span := i.spans[i.spansIdx] + if i.bucketsIdx == 0 { + // Seed origIdx for the first bucket. + i.currIdx = span.Offset + } else { + i.currIdx++ + } + + for i.idxInSpan >= span.Length { + // We have exhausted the current span and have to find a new + // one. We even handle pathologic spans of length 0 here. + i.idxInSpan = 0 + i.spansIdx++ + if i.spansIdx >= len(i.spans) { + return false + } + span = i.spans[i.spansIdx] + i.currIdx += span.Offset + } + + i.currCount = i.buckets[i.bucketsIdx] + i.idxInSpan++ + i.bucketsIdx++ + } else { + // Copy all of these into local variables so that we can forward to the + // next bucket and then roll back if needed. + origIdx, spansIdx, idxInSpan := i.origIdx, i.spansIdx, i.idxInSpan + span := i.spans[spansIdx] + firstPass := true + i.currCount = 0 + + mergeLoop: // Merge together all buckets from the original schema that fall into one bucket in the targetSchema. + for { + if i.bucketsIdx == 0 { + // Seed origIdx for the first bucket. + origIdx = span.Offset + } else { + origIdx++ + } + for idxInSpan >= span.Length { + // We have exhausted the current span and have to find a new + // one. We even handle pathologic spans of length 0 here. + idxInSpan = 0 + spansIdx++ + if spansIdx >= len(i.spans) { + if firstPass { + return false + } + break mergeLoop + } + span = i.spans[spansIdx] + origIdx += span.Offset + } + currIdx := targetIdx(origIdx, i.schema, i.targetSchema) + switch { + case firstPass: + i.currIdx = currIdx + firstPass = false + case currIdx != i.currIdx: + // Reached next bucket in targetSchema. + // Do not actually forward to the next bucket, but break out. + break mergeLoop + } + i.currCount += i.buckets[i.bucketsIdx] + idxInSpan++ + i.bucketsIdx++ + i.origIdx, i.spansIdx, i.idxInSpan = origIdx, spansIdx, idxInSpan + if i.schema == i.targetSchema { + // Don't need to test the next bucket for mergeability + // if we have no schema change anyway. + break mergeLoop + } + } + } + + // Skip buckets before absoluteStartValue for exponential schemas. + // TODO(beorn7): Maybe do something more efficient than this recursive call. + if !i.boundReachedStartValue && IsExponentialSchema(i.targetSchema) && getBoundExponential(i.currIdx, i.targetSchema) <= i.absoluteStartValue { + return i.Next() + } + i.boundReachedStartValue = true + return true +} + +type reverseFloatBucketIterator struct { + baseBucketIterator[float64, float64] + idxInSpan int32 // Changed from uint32 to allow negative values for exhaustion detection. +} + +func (i *reverseFloatBucketIterator) Next() bool { + i.currIdx-- + if i.bucketsIdx < 0 { + return false + } + + for i.idxInSpan < 0 { + // We have exhausted the current span and have to find a new + // one. We'll even handle pathologic spans of length 0. + i.spansIdx-- + i.idxInSpan = int32(i.spans[i.spansIdx].Length) - 1 + i.currIdx -= i.spans[i.spansIdx+1].Offset + } + + i.currCount = i.buckets[i.bucketsIdx] + i.bucketsIdx-- + i.idxInSpan-- + return true +} + +type allFloatBucketIterator struct { + h *FloatHistogram + leftIter reverseFloatBucketIterator + rightIter floatBucketIterator + // -1 means we are iterating negative buckets. + // 0 means it is time for the zero bucket. + // 1 means we are iterating positive buckets. + // Anything else means iteration is over. + state int8 + currBucket Bucket[float64] +} + +func (i *allFloatBucketIterator) Next() bool { + switch i.state { + case -1: + if i.leftIter.Next() { + i.currBucket = i.leftIter.At() + switch { + case i.currBucket.Upper < 0 && i.currBucket.Upper > -i.h.ZeroThreshold: + i.currBucket.Upper = -i.h.ZeroThreshold + case i.currBucket.Lower > 0 && i.currBucket.Lower < i.h.ZeroThreshold: + i.currBucket.Lower = i.h.ZeroThreshold + } + return true + } + i.state = 0 + return i.Next() + case 0: + i.state = 1 + if i.h.ZeroCount > 0 { + i.currBucket = i.h.ZeroBucket() + return true + } + return i.Next() + case 1: + if i.rightIter.Next() { + i.currBucket = i.rightIter.At() + switch { + case i.currBucket.Lower > 0 && i.currBucket.Lower < i.h.ZeroThreshold: + i.currBucket.Lower = i.h.ZeroThreshold + case i.currBucket.Upper < 0 && i.currBucket.Upper > -i.h.ZeroThreshold: + i.currBucket.Upper = -i.h.ZeroThreshold + } + return true + } + i.state = 42 + return false + } + + return false +} + +func (i *allFloatBucketIterator) At() Bucket[float64] { + return i.currBucket +} + +// targetIdx returns the bucket index in the target schema for the given bucket +// index idx in the original schema. +func targetIdx(idx, originSchema, targetSchema int32) int32 { + return ((idx - 1) >> (originSchema - targetSchema)) + 1 +} + +// addBuckets adds the buckets described by spansB/bucketsB to the buckets described by spansA/bucketsA, +// creating missing buckets in spansA/bucketsA as needed. +// It returns the resulting spans/buckets (which must be used instead of the original spansA/bucketsA, +// although spansA/bucketsA might get modified by this function). +// All buckets must use the same provided schema. +// Buckets in spansB/bucketsB with an absolute upper limit ≤ threshold are ignored. +// If negative is true, the buckets in spansB/bucketsB are subtracted rather than added. +func addBuckets( + schema int32, threshold float64, negative bool, + spansA []Span, bucketsA []float64, + spansB []Span, bucketsB []float64, +) ([]Span, []float64) { + var ( + iSpan = -1 + iBucket = -1 + iInSpan int32 + indexA int32 + indexB int32 + bIdxB int + bucketB float64 + deltaIndex int32 + lowerThanThreshold = true + ) + + for _, spanB := range spansB { + indexB += spanB.Offset + for j := 0; j < int(spanB.Length); j++ { + if lowerThanThreshold && IsExponentialSchema(schema) && getBoundExponential(indexB, schema) <= threshold { + goto nextLoop + } + lowerThanThreshold = false + + bucketB = bucketsB[bIdxB] + if negative { + bucketB *= -1 + } + + if iSpan == -1 { + if len(spansA) == 0 || spansA[0].Offset > indexB { + // Add bucket before all others. + bucketsA = append(bucketsA, 0) + copy(bucketsA[1:], bucketsA) + bucketsA[0] = bucketB + if len(spansA) > 0 && spansA[0].Offset == indexB+1 { + spansA[0].Length++ + spansA[0].Offset-- + goto nextLoop + } + spansA = append(spansA, Span{}) + copy(spansA[1:], spansA) + spansA[0] = Span{Offset: indexB, Length: 1} + if len(spansA) > 1 { + // Convert the absolute offset in the formerly + // first span to a relative offset. + spansA[1].Offset -= indexB + 1 + } + goto nextLoop + } else if spansA[0].Offset == indexB { + // Just add to first bucket. + bucketsA[0] += bucketB + goto nextLoop + } + iSpan, iBucket, iInSpan = 0, 0, 0 + indexA = spansA[0].Offset + } + deltaIndex = indexB - indexA + for { + remainingInSpan := int32(spansA[iSpan].Length) - iInSpan + if deltaIndex < remainingInSpan { + // Bucket is in current span. + iBucket += int(deltaIndex) + iInSpan += deltaIndex + bucketsA[iBucket] += bucketB + break + } + deltaIndex -= remainingInSpan + iBucket += int(remainingInSpan) + iSpan++ + if iSpan == len(spansA) || deltaIndex < spansA[iSpan].Offset { + // Bucket is in gap behind previous span (or there are no further spans). + bucketsA = append(bucketsA, 0) + copy(bucketsA[iBucket+1:], bucketsA[iBucket:]) + bucketsA[iBucket] = bucketB + switch { + case deltaIndex == 0: + // Directly after previous span, extend previous span. + if iSpan < len(spansA) { + spansA[iSpan].Offset-- + } + iSpan-- + iInSpan = int32(spansA[iSpan].Length) + spansA[iSpan].Length++ + goto nextLoop + case iSpan < len(spansA) && deltaIndex == spansA[iSpan].Offset-1: + // Directly before next span, extend next span. + iInSpan = 0 + spansA[iSpan].Offset-- + spansA[iSpan].Length++ + goto nextLoop + default: + // No next span, or next span is not directly adjacent to new bucket. + // Add new span. + iInSpan = 0 + if iSpan < len(spansA) { + spansA[iSpan].Offset -= deltaIndex + 1 + } + spansA = append(spansA, Span{}) + copy(spansA[iSpan+1:], spansA[iSpan:]) + spansA[iSpan] = Span{Length: 1, Offset: deltaIndex} + goto nextLoop + } + } else { + // Try start of next span. + deltaIndex -= spansA[iSpan].Offset + iInSpan = 0 + } + } + + nextLoop: + indexA = indexB + indexB++ + bIdxB++ + } + } + + return spansA, bucketsA +} + +func FloatBucketsMatch(b1, b2 []float64) bool { + if len(b1) != len(b2) { + return false + } + for i, b := range b1 { + if math.Float64bits(b) != math.Float64bits(b2[i]) { + return false + } + } + return true +} + +// ReduceResolution reduces the float histogram's spans, buckets into target schema. +// The target schema must be smaller than the current float histogram's schema. +// This will panic if the histogram has custom buckets or if the target schema is +// a custom buckets schema. +func (h *FloatHistogram) ReduceResolution(targetSchema int32) *FloatHistogram { + if h.UsesCustomBuckets() { + panic("cannot reduce resolution when there are custom buckets") + } + if IsCustomBucketsSchema(targetSchema) { + panic("cannot reduce resolution to custom buckets schema") + } + if targetSchema >= h.Schema { + panic(fmt.Errorf("cannot reduce resolution from schema %d to %d", h.Schema, targetSchema)) + } + + h.PositiveSpans, h.PositiveBuckets = reduceResolution(h.PositiveSpans, h.PositiveBuckets, h.Schema, targetSchema, false, true) + h.NegativeSpans, h.NegativeBuckets = reduceResolution(h.NegativeSpans, h.NegativeBuckets, h.Schema, targetSchema, false, true) + + h.Schema = targetSchema + return h +} diff --git a/vendor/github.com/prometheus/prometheus/model/histogram/generic.go b/vendor/github.com/prometheus/prometheus/model/histogram/generic.go new file mode 100644 index 00000000..a36b58d0 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/model/histogram/generic.go @@ -0,0 +1,786 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package histogram + +import ( + "errors" + "fmt" + "math" + "strings" +) + +const ( + ExponentialSchemaMax int32 = 8 + ExponentialSchemaMin int32 = -4 + CustomBucketsSchema int32 = -53 +) + +var ( + ErrHistogramCountNotBigEnough = errors.New("histogram's observation count should be at least the number of observations found in the buckets") + ErrHistogramCountMismatch = errors.New("histogram's observation count should equal the number of observations found in the buckets (in absence of NaN)") + ErrHistogramNegativeBucketCount = errors.New("histogram has a bucket whose observation count is negative") + ErrHistogramSpanNegativeOffset = errors.New("histogram has a span whose offset is negative") + ErrHistogramSpansBucketsMismatch = errors.New("histogram spans specify different number of buckets than provided") + ErrHistogramCustomBucketsMismatch = errors.New("histogram custom bounds are too few") + ErrHistogramCustomBucketsInvalid = errors.New("histogram custom bounds must be in strictly increasing order") + ErrHistogramCustomBucketsInfinite = errors.New("histogram custom bounds must be finite") + ErrHistogramsIncompatibleSchema = errors.New("cannot apply this operation on histograms with a mix of exponential and custom bucket schemas") + ErrHistogramsIncompatibleBounds = errors.New("cannot apply this operation on custom buckets histograms with different custom bounds") +) + +func IsCustomBucketsSchema(s int32) bool { + return s == CustomBucketsSchema +} + +func IsExponentialSchema(s int32) bool { + return s >= ExponentialSchemaMin && s <= ExponentialSchemaMax +} + +// BucketCount is a type constraint for the count in a bucket, which can be +// float64 (for type FloatHistogram) or uint64 (for type Histogram). +type BucketCount interface { + float64 | uint64 +} + +// InternalBucketCount is used internally by Histogram and FloatHistogram. The +// difference to the BucketCount above is that Histogram internally uses deltas +// between buckets rather than absolute counts (while FloatHistogram uses +// absolute counts directly). Go type parameters don't allow type +// specialization. Therefore, where special treatment of deltas between buckets +// vs. absolute counts is important, this information has to be provided as a +// separate boolean parameter "deltaBuckets". +type InternalBucketCount interface { + float64 | int64 +} + +// Bucket represents a bucket with lower and upper limit and the absolute count +// of samples in the bucket. It also specifies if each limit is inclusive or +// not. (Mathematically, inclusive limits create a closed interval, and +// non-inclusive limits an open interval.) +// +// To represent cumulative buckets, Lower is set to -Inf, and the Count is then +// cumulative (including the counts of all buckets for smaller values). +type Bucket[BC BucketCount] struct { + Lower, Upper float64 + LowerInclusive, UpperInclusive bool + Count BC + + // Index within schema. To easily compare buckets that share the same + // schema and sign (positive or negative). Irrelevant for the zero bucket. + Index int32 +} + +// strippedBucket is Bucket without bound values (which are expensive to calculate +// and not used in certain use cases). +type strippedBucket[BC BucketCount] struct { + count BC + index int32 +} + +// String returns a string representation of a Bucket, using the usual +// mathematical notation of '['/']' for inclusive bounds and '('/')' for +// non-inclusive bounds. +func (b Bucket[BC]) String() string { + var sb strings.Builder + if b.LowerInclusive { + sb.WriteRune('[') + } else { + sb.WriteRune('(') + } + fmt.Fprintf(&sb, "%g,%g", b.Lower, b.Upper) + if b.UpperInclusive { + sb.WriteRune(']') + } else { + sb.WriteRune(')') + } + fmt.Fprintf(&sb, ":%v", b.Count) + return sb.String() +} + +// BucketIterator iterates over the buckets of a Histogram, returning decoded +// buckets. +type BucketIterator[BC BucketCount] interface { + // Next advances the iterator by one. + Next() bool + // At returns the current bucket. + At() Bucket[BC] +} + +// baseBucketIterator provides a struct that is shared by most BucketIterator +// implementations, together with an implementation of the At method. This +// iterator can be embedded in full implementations of BucketIterator to save on +// code replication. +type baseBucketIterator[BC BucketCount, IBC InternalBucketCount] struct { + schema int32 + spans []Span + buckets []IBC + + positive bool // Whether this is for positive buckets. + + spansIdx int // Current span within spans slice. + idxInSpan uint32 // Index in the current span. 0 <= idxInSpan < span.Length. + bucketsIdx int // Current bucket within buckets slice. + + currCount IBC // Count in the current bucket. + currIdx int32 // The actual bucket index. + + customValues []float64 // Bounds (usually upper) for histograms with custom buckets. +} + +func (b *baseBucketIterator[BC, IBC]) At() Bucket[BC] { + return b.at(b.schema) +} + +// at is an internal version of the exported At to enable using a different schema. +func (b *baseBucketIterator[BC, IBC]) at(schema int32) Bucket[BC] { + bucket := Bucket[BC]{ + Count: BC(b.currCount), + Index: b.currIdx, + } + if b.positive { + bucket.Upper = getBound(b.currIdx, schema, b.customValues) + bucket.Lower = getBound(b.currIdx-1, schema, b.customValues) + } else { + bucket.Lower = -getBound(b.currIdx, schema, b.customValues) + bucket.Upper = -getBound(b.currIdx-1, schema, b.customValues) + } + if IsCustomBucketsSchema(schema) { + bucket.LowerInclusive = b.currIdx == 0 + bucket.UpperInclusive = true + } else { + bucket.LowerInclusive = bucket.Lower < 0 + bucket.UpperInclusive = bucket.Upper > 0 + } + return bucket +} + +// strippedAt returns current strippedBucket (which lacks bucket bounds but is cheaper to compute). +func (b *baseBucketIterator[BC, IBC]) strippedAt() strippedBucket[BC] { + return strippedBucket[BC]{ + count: BC(b.currCount), + index: b.currIdx, + } +} + +// compactBuckets is a generic function used by both Histogram.Compact and +// FloatHistogram.Compact. Set deltaBuckets to true if the provided buckets are +// deltas. Set it to false if the buckets contain absolute counts. +func compactBuckets[IBC InternalBucketCount](buckets []IBC, spans []Span, maxEmptyBuckets int, deltaBuckets bool) ([]IBC, []Span) { + // Fast path: If there are no empty buckets AND no offset in any span is + // <= maxEmptyBuckets AND no span has length 0, there is nothing to do and we can return + // immediately. We check that first because it's cheap and presumably + // common. + nothingToDo := true + var currentBucketAbsolute IBC + for _, bucket := range buckets { + if deltaBuckets { + currentBucketAbsolute += bucket + } else { + currentBucketAbsolute = bucket + } + if currentBucketAbsolute == 0 { + nothingToDo = false + break + } + } + if nothingToDo { + for _, span := range spans { + if int(span.Offset) <= maxEmptyBuckets || span.Length == 0 { + nothingToDo = false + break + } + } + if nothingToDo { + return buckets, spans + } + } + + var iBucket, iSpan int + var posInSpan uint32 + currentBucketAbsolute = 0 + + // Helper function. + emptyBucketsHere := func() int { + i := 0 + abs := currentBucketAbsolute + for uint32(i)+posInSpan < spans[iSpan].Length && abs == 0 { + i++ + if i+iBucket >= len(buckets) { + break + } + abs = buckets[i+iBucket] + } + return i + } + + // Merge spans with zero-offset to avoid special cases later. + if len(spans) > 1 { + for i, span := range spans[1:] { + if span.Offset == 0 { + spans[iSpan].Length += span.Length + continue + } + iSpan++ + if i+1 != iSpan { + spans[iSpan] = span + } + } + spans = spans[:iSpan+1] + iSpan = 0 + } + + // Merge spans with zero-length to avoid special cases later. + for i, span := range spans { + if span.Length == 0 { + if i+1 < len(spans) { + spans[i+1].Offset += span.Offset + } + continue + } + if i != iSpan { + spans[iSpan] = span + } + iSpan++ + } + spans = spans[:iSpan] + iSpan = 0 + + // Cut out empty buckets from start and end of spans, no matter + // what. Also cut out empty buckets from the middle of a span but only + // if there are more than maxEmptyBuckets consecutive empty buckets. + for iBucket < len(buckets) { + if deltaBuckets { + currentBucketAbsolute += buckets[iBucket] + } else { + currentBucketAbsolute = buckets[iBucket] + } + if nEmpty := emptyBucketsHere(); nEmpty > 0 { + if posInSpan > 0 && + nEmpty < int(spans[iSpan].Length-posInSpan) && + nEmpty <= maxEmptyBuckets { + // The empty buckets are in the middle of a + // span, and there are few enough to not bother. + // Just fast-forward. + iBucket += nEmpty + if deltaBuckets { + currentBucketAbsolute = 0 + } + posInSpan += uint32(nEmpty) + continue + } + // In all other cases, we cut out the empty buckets. + if deltaBuckets && iBucket+nEmpty < len(buckets) { + currentBucketAbsolute = -buckets[iBucket] + buckets[iBucket+nEmpty] += buckets[iBucket] + } + buckets = append(buckets[:iBucket], buckets[iBucket+nEmpty:]...) + if posInSpan == 0 { + // Start of span. + if nEmpty == int(spans[iSpan].Length) { + // The whole span is empty. + offset := spans[iSpan].Offset + spans = append(spans[:iSpan], spans[iSpan+1:]...) + if len(spans) > iSpan { + spans[iSpan].Offset += offset + int32(nEmpty) + } + continue + } + spans[iSpan].Length -= uint32(nEmpty) + spans[iSpan].Offset += int32(nEmpty) + continue + } + // It's in the middle or in the end of the span. + // Split the current span. + newSpan := Span{ + Offset: int32(nEmpty), + Length: spans[iSpan].Length - posInSpan - uint32(nEmpty), + } + spans[iSpan].Length = posInSpan + // In any case, we have to split to the next span. + iSpan++ + posInSpan = 0 + if newSpan.Length == 0 { + // The span is empty, so we were already at the end of a span. + // We don't have to insert the new span, just adjust the next + // span's offset, if there is one. + if iSpan < len(spans) { + spans[iSpan].Offset += int32(nEmpty) + } + continue + } + // Insert the new span. + spans = append(spans, Span{}) + if iSpan+1 < len(spans) { + copy(spans[iSpan+1:], spans[iSpan:]) + } + spans[iSpan] = newSpan + continue + } + iBucket++ + posInSpan++ + if posInSpan >= spans[iSpan].Length { + posInSpan = 0 + iSpan++ + } + } + if maxEmptyBuckets == 0 || len(buckets) == 0 { + return buckets, spans + } + + // Finally, check if any offsets between spans are small enough to merge + // the spans. + iBucket = int(spans[0].Length) + if deltaBuckets { + currentBucketAbsolute = 0 + for _, bucket := range buckets[:iBucket] { + currentBucketAbsolute += bucket + } + } + iSpan = 1 + for iSpan < len(spans) { + if int(spans[iSpan].Offset) > maxEmptyBuckets { + l := int(spans[iSpan].Length) + if deltaBuckets { + for _, bucket := range buckets[iBucket : iBucket+l] { + currentBucketAbsolute += bucket + } + } + iBucket += l + iSpan++ + continue + } + // Merge span with previous one and insert empty buckets. + offset := int(spans[iSpan].Offset) + spans[iSpan-1].Length += uint32(offset) + spans[iSpan].Length + spans = append(spans[:iSpan], spans[iSpan+1:]...) + newBuckets := make([]IBC, len(buckets)+offset) + copy(newBuckets, buckets[:iBucket]) + copy(newBuckets[iBucket+offset:], buckets[iBucket:]) + if deltaBuckets { + newBuckets[iBucket] = -currentBucketAbsolute + newBuckets[iBucket+offset] += currentBucketAbsolute + } + iBucket += offset + buckets = newBuckets + currentBucketAbsolute = buckets[iBucket] + // Note that with many merges, it would be more efficient to + // first record all the chunks of empty buckets to insert and + // then do it in one go through all the buckets. + } + + return buckets, spans +} + +func checkHistogramSpans(spans []Span, numBuckets int) error { + var spanBuckets int + for n, span := range spans { + if n > 0 && span.Offset < 0 { + return fmt.Errorf("span number %d with offset %d: %w", n+1, span.Offset, ErrHistogramSpanNegativeOffset) + } + spanBuckets += int(span.Length) + } + if spanBuckets != numBuckets { + return fmt.Errorf("spans need %d buckets, have %d buckets: %w", spanBuckets, numBuckets, ErrHistogramSpansBucketsMismatch) + } + return nil +} + +func checkHistogramBuckets[BC BucketCount, IBC InternalBucketCount](buckets []IBC, count *BC, deltas bool) error { + if len(buckets) == 0 { + return nil + } + + var last IBC + for i := 0; i < len(buckets); i++ { + var c IBC + if deltas { + c = last + buckets[i] + } else { + c = buckets[i] + } + if c < 0 { + return fmt.Errorf("bucket number %d has observation count of %v: %w", i+1, c, ErrHistogramNegativeBucketCount) + } + last = c + *count += BC(c) + } + + return nil +} + +func checkHistogramCustomBounds(bounds []float64, spans []Span, numBuckets int) error { + prev := math.Inf(-1) + for _, curr := range bounds { + if curr <= prev { + return fmt.Errorf("previous bound is %f and current is %f: %w", prev, curr, ErrHistogramCustomBucketsInvalid) + } + prev = curr + } + if prev == math.Inf(1) { + return fmt.Errorf("last +Inf bound must not be explicitly defined: %w", ErrHistogramCustomBucketsInfinite) + } + + var spanBuckets int + var totalSpanLength int + for n, span := range spans { + if span.Offset < 0 { + return fmt.Errorf("span number %d with offset %d: %w", n+1, span.Offset, ErrHistogramSpanNegativeOffset) + } + spanBuckets += int(span.Length) + totalSpanLength += int(span.Length) + int(span.Offset) + } + if spanBuckets != numBuckets { + return fmt.Errorf("spans need %d buckets, have %d buckets: %w", spanBuckets, numBuckets, ErrHistogramSpansBucketsMismatch) + } + if (len(bounds) + 1) < totalSpanLength { + return fmt.Errorf("only %d custom bounds defined which is insufficient to cover total span length of %d: %w", len(bounds), totalSpanLength, ErrHistogramCustomBucketsMismatch) + } + + return nil +} + +func getBound(idx, schema int32, customValues []float64) float64 { + if IsCustomBucketsSchema(schema) { + length := int32(len(customValues)) + switch { + case idx > length || idx < -1: + panic(fmt.Errorf("index %d out of bounds for custom bounds of length %d", idx, length)) + case idx == length: + return math.Inf(1) + case idx == -1: + return math.Inf(-1) + default: + return customValues[idx] + } + } + return getBoundExponential(idx, schema) +} + +func getBoundExponential(idx, schema int32) float64 { + // Here a bit of context about the behavior for the last bucket counting + // regular numbers (called simply "last bucket" below) and the bucket + // counting observations of ±Inf (called "inf bucket" below, with an idx + // one higher than that of the "last bucket"): + // + // If we apply the usual formula to the last bucket, its upper bound + // would be calculated as +Inf. The reason is that the max possible + // regular float64 number (math.MaxFloat64) doesn't coincide with one of + // the calculated bucket boundaries. So the calculated boundary has to + // be larger than math.MaxFloat64, and the only float64 larger than + // math.MaxFloat64 is +Inf. However, we want to count actual + // observations of ±Inf in the inf bucket. Therefore, we have to treat + // the upper bound of the last bucket specially and set it to + // math.MaxFloat64. (The upper bound of the inf bucket, with its idx + // being one higher than that of the last bucket, naturally comes out as + // +Inf by the usual formula. So that's fine.) + // + // math.MaxFloat64 has a frac of 0.9999999999999999 and an exp of + // 1024. If there were a float64 number following math.MaxFloat64, it + // would have a frac of 1.0 and an exp of 1024, or equivalently a frac + // of 0.5 and an exp of 1025. However, since frac must be smaller than + // 1, and exp must be smaller than 1025, either representation overflows + // a float64. (Which, in turn, is the reason that math.MaxFloat64 is the + // largest possible float64. Q.E.D.) However, the formula for + // calculating the upper bound from the idx and schema of the last + // bucket results in precisely that. It is either frac=1.0 & exp=1024 + // (for schema < 0) or frac=0.5 & exp=1025 (for schema >=0). (This is, + // by the way, a power of two where the exponent itself is a power of + // two, 2¹⁰ in fact, which coincides with a bucket boundary in all + // schemas.) So these are the special cases we have to catch below. + if schema < 0 { + exp := int(idx) << -schema + if exp == 1024 { + // This is the last bucket before the overflow bucket + // (for ±Inf observations). Return math.MaxFloat64 as + // explained above. + return math.MaxFloat64 + } + return math.Ldexp(1, exp) + } + + fracIdx := idx & ((1 << schema) - 1) + frac := exponentialBounds[schema][fracIdx] + exp := (int(idx) >> schema) + 1 + if frac == 0.5 && exp == 1025 { + // This is the last bucket before the overflow bucket (for ±Inf + // observations). Return math.MaxFloat64 as explained above. + return math.MaxFloat64 + } + return math.Ldexp(frac, exp) +} + +// exponentialBounds is a precalculated table of bucket bounds in the interval +// [0.5,1) in schema 0 to 8. +var exponentialBounds = [][]float64{ + // Schema "0": + {0.5}, + // Schema 1: + {0.5, 0.7071067811865475}, + // Schema 2: + {0.5, 0.5946035575013605, 0.7071067811865475, 0.8408964152537144}, + // Schema 3: + { + 0.5, 0.5452538663326288, 0.5946035575013605, 0.6484197773255048, + 0.7071067811865475, 0.7711054127039704, 0.8408964152537144, 0.9170040432046711, + }, + // Schema 4: + { + 0.5, 0.5221368912137069, 0.5452538663326288, 0.5693943173783458, + 0.5946035575013605, 0.620928906036742, 0.6484197773255048, 0.6771277734684463, + 0.7071067811865475, 0.7384130729697496, 0.7711054127039704, 0.805245165974627, + 0.8408964152537144, 0.8781260801866495, 0.9170040432046711, 0.9576032806985735, + }, + // Schema 5: + { + 0.5, 0.5109485743270583, 0.5221368912137069, 0.5335702003384117, + 0.5452538663326288, 0.5571933712979462, 0.5693943173783458, 0.5818624293887887, + 0.5946035575013605, 0.6076236799902344, 0.620928906036742, 0.6345254785958666, + 0.6484197773255048, 0.6626183215798706, 0.6771277734684463, 0.6919549409819159, + 0.7071067811865475, 0.7225904034885232, 0.7384130729697496, 0.7545822137967112, + 0.7711054127039704, 0.7879904225539431, 0.805245165974627, 0.8228777390769823, + 0.8408964152537144, 0.8593096490612387, 0.8781260801866495, 0.8973545375015533, + 0.9170040432046711, 0.9370838170551498, 0.9576032806985735, 0.9785720620876999, + }, + // Schema 6: + { + 0.5, 0.5054446430258502, 0.5109485743270583, 0.5165124395106142, + 0.5221368912137069, 0.5278225891802786, 0.5335702003384117, 0.5393803988785598, + 0.5452538663326288, 0.5511912916539204, 0.5571933712979462, 0.5632608093041209, + 0.5693943173783458, 0.5755946149764913, 0.5818624293887887, 0.5881984958251406, + 0.5946035575013605, 0.6010783657263515, 0.6076236799902344, 0.6142402680534349, + 0.620928906036742, 0.6276903785123455, 0.6345254785958666, 0.6414350080393891, + 0.6484197773255048, 0.6554806057623822, 0.6626183215798706, 0.6698337620266515, + 0.6771277734684463, 0.6845012114872953, 0.6919549409819159, 0.6994898362691555, + 0.7071067811865475, 0.7148066691959849, 0.7225904034885232, 0.7304588970903234, + 0.7384130729697496, 0.7464538641456323, 0.7545822137967112, 0.762799075372269, + 0.7711054127039704, 0.7795022001189185, 0.7879904225539431, 0.7965710756711334, + 0.805245165974627, 0.8140137109286738, 0.8228777390769823, 0.8318382901633681, + 0.8408964152537144, 0.8500531768592616, 0.8593096490612387, 0.8686669176368529, + 0.8781260801866495, 0.8876882462632604, 0.8973545375015533, 0.9071260877501991, + 0.9170040432046711, 0.9269895625416926, 0.9370838170551498, 0.9472879907934827, + 0.9576032806985735, 0.9680308967461471, 0.9785720620876999, 0.9892280131939752, + }, + // Schema 7: + { + 0.5, 0.5027149505564014, 0.5054446430258502, 0.5081891574554764, + 0.5109485743270583, 0.5137229745593818, 0.5165124395106142, 0.5193170509806894, + 0.5221368912137069, 0.5249720429003435, 0.5278225891802786, 0.5306886136446309, + 0.5335702003384117, 0.5364674337629877, 0.5393803988785598, 0.5423091811066545, + 0.5452538663326288, 0.5482145409081883, 0.5511912916539204, 0.5541842058618393, + 0.5571933712979462, 0.5602188762048033, 0.5632608093041209, 0.5663192597993595, + 0.5693943173783458, 0.572486072215902, 0.5755946149764913, 0.5787200368168754, + 0.5818624293887887, 0.585021884841625, 0.5881984958251406, 0.5913923554921704, + 0.5946035575013605, 0.5978321960199137, 0.6010783657263515, 0.6043421618132907, + 0.6076236799902344, 0.6109230164863786, 0.6142402680534349, 0.6175755319684665, + 0.620928906036742, 0.6243004885946023, 0.6276903785123455, 0.6310986751971253, + 0.6345254785958666, 0.637970889198196, 0.6414350080393891, 0.6449179367033329, + 0.6484197773255048, 0.6519406325959679, 0.6554806057623822, 0.659039800633032, + 0.6626183215798706, 0.6662162735415805, 0.6698337620266515, 0.6734708931164728, + 0.6771277734684463, 0.6808045103191123, 0.6845012114872953, 0.688217985377265, + 0.6919549409819159, 0.6957121878859629, 0.6994898362691555, 0.7032879969095076, + 0.7071067811865475, 0.7109463010845827, 0.7148066691959849, 0.718687998724491, + 0.7225904034885232, 0.7265139979245261, 0.7304588970903234, 0.7344252166684908, + 0.7384130729697496, 0.7424225829363761, 0.7464538641456323, 0.7505070348132126, + 0.7545822137967112, 0.7586795205991071, 0.762799075372269, 0.7669409989204777, + 0.7711054127039704, 0.7752924388424999, 0.7795022001189185, 0.7837348199827764, + 0.7879904225539431, 0.7922691326262467, 0.7965710756711334, 0.8008963778413465, + 0.805245165974627, 0.8096175675974316, 0.8140137109286738, 0.8184337248834821, + 0.8228777390769823, 0.8273458838280969, 0.8318382901633681, 0.8363550898207981, + 0.8408964152537144, 0.8454623996346523, 0.8500531768592616, 0.8546688815502312, + 0.8593096490612387, 0.8639756154809185, 0.8686669176368529, 0.8733836930995842, + 0.8781260801866495, 0.8828942179666361, 0.8876882462632604, 0.8925083056594671, + 0.8973545375015533, 0.9022270839033115, 0.9071260877501991, 0.9120516927035263, + 0.9170040432046711, 0.9219832844793128, 0.9269895625416926, 0.9320230241988943, + 0.9370838170551498, 0.9421720895161669, 0.9472879907934827, 0.9524316709088368, + 0.9576032806985735, 0.9628029718180622, 0.9680308967461471, 0.9732872087896164, + 0.9785720620876999, 0.9838856116165875, 0.9892280131939752, 0.9945994234836328, + }, + // Schema 8: + { + 0.5, 0.5013556375251013, 0.5027149505564014, 0.5040779490592088, + 0.5054446430258502, 0.5068150424757447, 0.5081891574554764, 0.509566998038869, + 0.5109485743270583, 0.5123338964485679, 0.5137229745593818, 0.5151158188430205, + 0.5165124395106142, 0.5179128468009786, 0.5193170509806894, 0.520725062344158, + 0.5221368912137069, 0.5235525479396449, 0.5249720429003435, 0.526395386502313, + 0.5278225891802786, 0.5292536613972564, 0.5306886136446309, 0.5321274564422321, + 0.5335702003384117, 0.5350168559101208, 0.5364674337629877, 0.5379219445313954, + 0.5393803988785598, 0.5408428074966075, 0.5423091811066545, 0.5437795304588847, + 0.5452538663326288, 0.5467321995364429, 0.5482145409081883, 0.549700901315111, + 0.5511912916539204, 0.5526857228508706, 0.5541842058618393, 0.5556867516724088, + 0.5571933712979462, 0.5587040757836845, 0.5602188762048033, 0.5617377836665098, + 0.5632608093041209, 0.564787964283144, 0.5663192597993595, 0.5678547070789026, + 0.5693943173783458, 0.5709381019847808, 0.572486072215902, 0.5740382394200894, + 0.5755946149764913, 0.5771552102951081, 0.5787200368168754, 0.5802891060137493, + 0.5818624293887887, 0.5834400184762408, 0.585021884841625, 0.5866080400818185, + 0.5881984958251406, 0.5897932637314379, 0.5913923554921704, 0.5929957828304968, + 0.5946035575013605, 0.5962156912915756, 0.5978321960199137, 0.5994530835371903, + 0.6010783657263515, 0.6027080545025619, 0.6043421618132907, 0.6059806996384005, + 0.6076236799902344, 0.6092711149137041, 0.6109230164863786, 0.6125793968185725, + 0.6142402680534349, 0.6159056423670379, 0.6175755319684665, 0.6192499490999082, + 0.620928906036742, 0.622612415087629, 0.6243004885946023, 0.6259931389331581, + 0.6276903785123455, 0.6293922197748583, 0.6310986751971253, 0.6328097572894031, + 0.6345254785958666, 0.6362458516947014, 0.637970889198196, 0.6397006037528346, + 0.6414350080393891, 0.6431741147730128, 0.6449179367033329, 0.6466664866145447, + 0.6484197773255048, 0.6501778216898253, 0.6519406325959679, 0.6537082229673385, + 0.6554806057623822, 0.6572577939746774, 0.659039800633032, 0.6608266388015788, + 0.6626183215798706, 0.6644148621029772, 0.6662162735415805, 0.6680225691020727, + 0.6698337620266515, 0.6716498655934177, 0.6734708931164728, 0.6752968579460171, + 0.6771277734684463, 0.6789636531064505, 0.6808045103191123, 0.6826503586020058, + 0.6845012114872953, 0.6863570825438342, 0.688217985377265, 0.690083933630119, + 0.6919549409819159, 0.6938310211492645, 0.6957121878859629, 0.6975984549830999, + 0.6994898362691555, 0.7013863456101023, 0.7032879969095076, 0.7051948041086352, + 0.7071067811865475, 0.7090239421602076, 0.7109463010845827, 0.7128738720527471, + 0.7148066691959849, 0.7167447066838943, 0.718687998724491, 0.7206365595643126, + 0.7225904034885232, 0.7245495448210174, 0.7265139979245261, 0.7284837772007218, + 0.7304588970903234, 0.7324393720732029, 0.7344252166684908, 0.7364164454346837, + 0.7384130729697496, 0.7404151139112358, 0.7424225829363761, 0.7444354947621984, + 0.7464538641456323, 0.7484777058836176, 0.7505070348132126, 0.7525418658117031, + 0.7545822137967112, 0.7566280937263048, 0.7586795205991071, 0.7607365094544071, + 0.762799075372269, 0.7648672334736434, 0.7669409989204777, 0.7690203869158282, + 0.7711054127039704, 0.7731960915705107, 0.7752924388424999, 0.7773944698885442, + 0.7795022001189185, 0.7816156449856788, 0.7837348199827764, 0.7858597406461707, + 0.7879904225539431, 0.7901268813264122, 0.7922691326262467, 0.7944171921585818, + 0.7965710756711334, 0.7987307989543135, 0.8008963778413465, 0.8030678282083853, + 0.805245165974627, 0.8074284071024302, 0.8096175675974316, 0.8118126635086642, + 0.8140137109286738, 0.8162207259936375, 0.8184337248834821, 0.820652723822003, + 0.8228777390769823, 0.8251087869603088, 0.8273458838280969, 0.8295890460808079, + 0.8318382901633681, 0.8340936325652911, 0.8363550898207981, 0.8386226785089391, + 0.8408964152537144, 0.8431763167241966, 0.8454623996346523, 0.8477546807446661, + 0.8500531768592616, 0.8523579048290255, 0.8546688815502312, 0.8569861239649629, + 0.8593096490612387, 0.8616394738731368, 0.8639756154809185, 0.8663180910111553, + 0.8686669176368529, 0.871022112577578, 0.8733836930995842, 0.8757516765159389, + 0.8781260801866495, 0.8805069215187917, 0.8828942179666361, 0.8852879870317771, + 0.8876882462632604, 0.890095013257712, 0.8925083056594671, 0.8949281411607002, + 0.8973545375015533, 0.8997875124702672, 0.9022270839033115, 0.9046732696855155, + 0.9071260877501991, 0.909585556079304, 0.9120516927035263, 0.9145245157024483, + 0.9170040432046711, 0.9194902933879467, 0.9219832844793128, 0.9244830347552253, + 0.9269895625416926, 0.92950288621441, 0.9320230241988943, 0.9345499949706191, + 0.9370838170551498, 0.93962450902828, 0.9421720895161669, 0.9447265771954693, + 0.9472879907934827, 0.9498563490882775, 0.9524316709088368, 0.9550139751351947, + 0.9576032806985735, 0.9601996065815236, 0.9628029718180622, 0.9654133954938133, + 0.9680308967461471, 0.9706554947643201, 0.9732872087896164, 0.9759260581154889, + 0.9785720620876999, 0.9812252401044634, 0.9838856116165875, 0.9865531961276168, + 0.9892280131939752, 0.9919100824251095, 0.9945994234836328, 0.9972960560854698, + }, +} + +// reduceResolution reduces the input spans, buckets in origin schema to the spans, buckets in target schema. +// The target schema must be smaller than the original schema. +// Set deltaBuckets to true if the provided buckets are +// deltas. Set it to false if the buckets contain absolute counts. +// Set inplace to true to reuse input slices and avoid allocations (otherwise +// new slices will be allocated for result). +func reduceResolution[IBC InternalBucketCount]( + originSpans []Span, + originBuckets []IBC, + originSchema, + targetSchema int32, + deltaBuckets bool, + inplace bool, +) ([]Span, []IBC) { + var ( + targetSpans []Span // The spans in the target schema. + targetBuckets []IBC // The bucket counts in the target schema. + bucketIdx int32 // The index of bucket in the origin schema. + bucketCountIdx int // The position of a bucket in origin bucket count slice `originBuckets`. + targetBucketIdx int32 // The index of bucket in the target schema. + lastBucketCount IBC // The last visited bucket's count in the origin schema. + lastTargetBucketIdx int32 // The index of the last added target bucket. + lastTargetBucketCount IBC + ) + + if inplace { + // Slice reuse is safe because when reducing the resolution, + // target slices don't grow faster than origin slices are being read. + targetSpans = originSpans[:0] + targetBuckets = originBuckets[:0] + } + + for _, span := range originSpans { + // Determine the index of the first bucket in this span. + bucketIdx += span.Offset + for j := 0; j < int(span.Length); j++ { + // Determine the index of the bucket in the target schema from the index in the original schema. + targetBucketIdx = targetIdx(bucketIdx, originSchema, targetSchema) + + switch { + case len(targetSpans) == 0: + // This is the first span in the targetSpans. + span := Span{ + Offset: targetBucketIdx, + Length: 1, + } + targetSpans = append(targetSpans, span) + targetBuckets = append(targetBuckets, originBuckets[bucketCountIdx]) + lastTargetBucketIdx = targetBucketIdx + lastBucketCount = originBuckets[bucketCountIdx] + lastTargetBucketCount = originBuckets[bucketCountIdx] + + case lastTargetBucketIdx == targetBucketIdx: + // The current bucket has to be merged into the same target bucket as the previous bucket. + if deltaBuckets { + lastBucketCount += originBuckets[bucketCountIdx] + targetBuckets[len(targetBuckets)-1] += lastBucketCount + lastTargetBucketCount += lastBucketCount + } else { + targetBuckets[len(targetBuckets)-1] += originBuckets[bucketCountIdx] + } + + case (lastTargetBucketIdx + 1) == targetBucketIdx: + // The current bucket has to go into a new target bucket, + // and that bucket is next to the previous target bucket, + // so we add it to the current target span. + targetSpans[len(targetSpans)-1].Length++ + lastTargetBucketIdx++ + if deltaBuckets { + lastBucketCount += originBuckets[bucketCountIdx] + targetBuckets = append(targetBuckets, lastBucketCount-lastTargetBucketCount) + lastTargetBucketCount = lastBucketCount + } else { + targetBuckets = append(targetBuckets, originBuckets[bucketCountIdx]) + } + + case (lastTargetBucketIdx + 1) < targetBucketIdx: + // The current bucket has to go into a new target bucket, + // and that bucket is separated by a gap from the previous target bucket, + // so we need to add a new target span. + span := Span{ + Offset: targetBucketIdx - lastTargetBucketIdx - 1, + Length: 1, + } + targetSpans = append(targetSpans, span) + lastTargetBucketIdx = targetBucketIdx + if deltaBuckets { + lastBucketCount += originBuckets[bucketCountIdx] + targetBuckets = append(targetBuckets, lastBucketCount-lastTargetBucketCount) + lastTargetBucketCount = lastBucketCount + } else { + targetBuckets = append(targetBuckets, originBuckets[bucketCountIdx]) + } + } + + bucketIdx++ + bucketCountIdx++ + } + } + + return targetSpans, targetBuckets +} + +func clearIfNotNil[T any](items []T) []T { + if items == nil { + return nil + } + return items[:0] +} diff --git a/vendor/github.com/prometheus/prometheus/model/histogram/histogram.go b/vendor/github.com/prometheus/prometheus/model/histogram/histogram.go new file mode 100644 index 00000000..e4b99ec4 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/model/histogram/histogram.go @@ -0,0 +1,632 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package histogram + +import ( + "fmt" + "math" + "slices" + "strings" +) + +// CounterResetHint contains the known information about a counter reset, +// or alternatively that we are dealing with a gauge histogram, where counter resets do not apply. +type CounterResetHint byte + +const ( + UnknownCounterReset CounterResetHint = iota // UnknownCounterReset means we cannot say if this histogram signals a counter reset or not. + CounterReset // CounterReset means there was definitely a counter reset starting from this histogram. + NotCounterReset // NotCounterReset means there was definitely no counter reset with this histogram. + GaugeType // GaugeType means this is a gauge histogram, where counter resets do not happen. +) + +// Histogram encodes a sparse, high-resolution histogram. See the design +// document for full details: +// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit# +// +// The most tricky bit is how bucket indices represent real bucket boundaries. +// An example for schema 0 (by which each bucket is twice as wide as the +// previous bucket): +// +// Bucket boundaries → [-2,-1) [-1,-0.5) [-0.5,-0.25) ... [-0.001,0.001] ... (0.25,0.5] (0.5,1] (1,2] .... +// ↑ ↑ ↑ ↑ ↑ ↑ ↑ +// Zero bucket (width e.g. 0.001) → | | | ZB | | | +// Positive bucket indices → | | | ... -1 0 1 2 3 +// Negative bucket indices → 3 2 1 0 -1 ... +// +// Which bucket indices are actually used is determined by the spans. +type Histogram struct { + // Counter reset information. + CounterResetHint CounterResetHint + // Currently valid schema numbers are -4 <= n <= 8 for exponential buckets, + // They are all for base-2 bucket schemas, where 1 is a bucket boundary in + // each case, and then each power of two is divided into 2^n logarithmic buckets. + // Or in other words, each bucket boundary is the previous boundary times + // 2^(2^-n). Another valid schema number is -53 for custom buckets, defined by + // the CustomValues field. + Schema int32 + // Width of the zero bucket. + ZeroThreshold float64 + // Observations falling into the zero bucket. + ZeroCount uint64 + // Total number of observations. + Count uint64 + // Sum of observations. This is also used as the stale marker. + Sum float64 + // Spans for positive and negative buckets (see Span below). + PositiveSpans, NegativeSpans []Span + // Observation counts in buckets. The first element is an absolute + // count. All following ones are deltas relative to the previous + // element. + PositiveBuckets, NegativeBuckets []int64 + // Holds the custom (usually upper) bounds for bucket definitions, otherwise nil. + // This slice is interned, to be treated as immutable and copied by reference. + // These numbers should be strictly increasing. This field is only used when the + // schema is for custom buckets, and the ZeroThreshold, ZeroCount, NegativeSpans + // and NegativeBuckets fields are not used in that case. + CustomValues []float64 +} + +// A Span defines a continuous sequence of buckets. +type Span struct { + // Gap to previous span (always positive), or starting index for the 1st + // span (which can be negative). + Offset int32 + // Length of the span. + Length uint32 +} + +func (h *Histogram) UsesCustomBuckets() bool { + return IsCustomBucketsSchema(h.Schema) +} + +// Copy returns a deep copy of the Histogram. +func (h *Histogram) Copy() *Histogram { + c := Histogram{ + CounterResetHint: h.CounterResetHint, + Schema: h.Schema, + Count: h.Count, + Sum: h.Sum, + } + + if h.UsesCustomBuckets() { + if len(h.CustomValues) != 0 { + c.CustomValues = make([]float64, len(h.CustomValues)) + copy(c.CustomValues, h.CustomValues) + } + } else { + c.ZeroThreshold = h.ZeroThreshold + c.ZeroCount = h.ZeroCount + + if len(h.NegativeSpans) != 0 { + c.NegativeSpans = make([]Span, len(h.NegativeSpans)) + copy(c.NegativeSpans, h.NegativeSpans) + } + if len(h.NegativeBuckets) != 0 { + c.NegativeBuckets = make([]int64, len(h.NegativeBuckets)) + copy(c.NegativeBuckets, h.NegativeBuckets) + } + } + + if len(h.PositiveSpans) != 0 { + c.PositiveSpans = make([]Span, len(h.PositiveSpans)) + copy(c.PositiveSpans, h.PositiveSpans) + } + if len(h.PositiveBuckets) != 0 { + c.PositiveBuckets = make([]int64, len(h.PositiveBuckets)) + copy(c.PositiveBuckets, h.PositiveBuckets) + } + + return &c +} + +// CopyTo makes a deep copy into the given Histogram object. +// The destination object has to be a non-nil pointer. +func (h *Histogram) CopyTo(to *Histogram) { + to.CounterResetHint = h.CounterResetHint + to.Schema = h.Schema + to.Count = h.Count + to.Sum = h.Sum + + if h.UsesCustomBuckets() { + to.ZeroThreshold = 0 + to.ZeroCount = 0 + + to.NegativeSpans = clearIfNotNil(to.NegativeSpans) + to.NegativeBuckets = clearIfNotNil(to.NegativeBuckets) + + to.CustomValues = resize(to.CustomValues, len(h.CustomValues)) + copy(to.CustomValues, h.CustomValues) + } else { + to.ZeroThreshold = h.ZeroThreshold + to.ZeroCount = h.ZeroCount + + to.NegativeSpans = resize(to.NegativeSpans, len(h.NegativeSpans)) + copy(to.NegativeSpans, h.NegativeSpans) + + to.NegativeBuckets = resize(to.NegativeBuckets, len(h.NegativeBuckets)) + copy(to.NegativeBuckets, h.NegativeBuckets) + + to.CustomValues = clearIfNotNil(to.CustomValues) + } + + to.PositiveSpans = resize(to.PositiveSpans, len(h.PositiveSpans)) + copy(to.PositiveSpans, h.PositiveSpans) + + to.PositiveBuckets = resize(to.PositiveBuckets, len(h.PositiveBuckets)) + copy(to.PositiveBuckets, h.PositiveBuckets) +} + +// String returns a string representation of the Histogram. +func (h *Histogram) String() string { + var sb strings.Builder + fmt.Fprintf(&sb, "{count:%d, sum:%g", h.Count, h.Sum) + + var nBuckets []Bucket[uint64] + for it := h.NegativeBucketIterator(); it.Next(); { + bucket := it.At() + if bucket.Count != 0 { + nBuckets = append(nBuckets, it.At()) + } + } + for i := len(nBuckets) - 1; i >= 0; i-- { + fmt.Fprintf(&sb, ", %s", nBuckets[i].String()) + } + + if h.ZeroCount != 0 { + fmt.Fprintf(&sb, ", %s", h.ZeroBucket().String()) + } + + for it := h.PositiveBucketIterator(); it.Next(); { + bucket := it.At() + if bucket.Count != 0 { + fmt.Fprintf(&sb, ", %s", bucket.String()) + } + } + + sb.WriteRune('}') + return sb.String() +} + +// ZeroBucket returns the zero bucket. This method panics if the schema is for custom buckets. +func (h *Histogram) ZeroBucket() Bucket[uint64] { + if h.UsesCustomBuckets() { + panic("histograms with custom buckets have no zero bucket") + } + return Bucket[uint64]{ + Lower: -h.ZeroThreshold, + Upper: h.ZeroThreshold, + LowerInclusive: true, + UpperInclusive: true, + Count: h.ZeroCount, + } +} + +// PositiveBucketIterator returns a BucketIterator to iterate over all positive +// buckets in ascending order (starting next to the zero bucket and going up). +func (h *Histogram) PositiveBucketIterator() BucketIterator[uint64] { + it := newRegularBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true, h.CustomValues) + return &it +} + +// NegativeBucketIterator returns a BucketIterator to iterate over all negative +// buckets in descending order (starting next to the zero bucket and going down). +func (h *Histogram) NegativeBucketIterator() BucketIterator[uint64] { + it := newRegularBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false, nil) + return &it +} + +// CumulativeBucketIterator returns a BucketIterator to iterate over a +// cumulative view of the buckets. This method currently only supports +// Histograms without negative buckets and panics if the Histogram has negative +// buckets. It is currently only used for testing. +func (h *Histogram) CumulativeBucketIterator() BucketIterator[uint64] { + if len(h.NegativeBuckets) > 0 { + panic("CumulativeBucketIterator called on Histogram with negative buckets") + } + return &cumulativeBucketIterator{h: h, posSpansIdx: -1} +} + +// Equals returns true if the given histogram matches exactly. +// Exact match is when there are no new buckets (even empty) and no missing buckets, +// and all the bucket values match. Spans can have different empty length spans in between, +// but they must represent the same bucket layout to match. +// Sum is compared based on its bit pattern because this method +// is about data equality rather than mathematical equality. +// We ignore fields that are not used based on the exponential / custom buckets schema, +// but check fields where differences may cause unintended behaviour even if they are not +// supposed to be used according to the schema. +func (h *Histogram) Equals(h2 *Histogram) bool { + if h2 == nil { + return false + } + + if h.Schema != h2.Schema || h.Count != h2.Count || + math.Float64bits(h.Sum) != math.Float64bits(h2.Sum) { + return false + } + + if h.UsesCustomBuckets() { + if !FloatBucketsMatch(h.CustomValues, h2.CustomValues) { + return false + } + } + + if h.ZeroThreshold != h2.ZeroThreshold || h.ZeroCount != h2.ZeroCount { + return false + } + + if !spansMatch(h.NegativeSpans, h2.NegativeSpans) { + return false + } + if !slices.Equal(h.NegativeBuckets, h2.NegativeBuckets) { + return false + } + + if !spansMatch(h.PositiveSpans, h2.PositiveSpans) { + return false + } + if !slices.Equal(h.PositiveBuckets, h2.PositiveBuckets) { + return false + } + + return true +} + +// spansMatch returns true if both spans represent the same bucket layout +// after combining zero length spans with the next non-zero length span. +func spansMatch(s1, s2 []Span) bool { + if len(s1) == 0 && len(s2) == 0 { + return true + } + + s1idx, s2idx := 0, 0 + for { + if s1idx >= len(s1) { + return allEmptySpans(s2[s2idx:]) + } + if s2idx >= len(s2) { + return allEmptySpans(s1[s1idx:]) + } + + currS1, currS2 := s1[s1idx], s2[s2idx] + s1idx++ + s2idx++ + if currS1.Length == 0 { + // This span is zero length, so we add consecutive such spans + // until we find a non-zero span. + for ; s1idx < len(s1) && s1[s1idx].Length == 0; s1idx++ { + currS1.Offset += s1[s1idx].Offset + } + if s1idx < len(s1) { + currS1.Offset += s1[s1idx].Offset + currS1.Length = s1[s1idx].Length + s1idx++ + } + } + if currS2.Length == 0 { + // This span is zero length, so we add consecutive such spans + // until we find a non-zero span. + for ; s2idx < len(s2) && s2[s2idx].Length == 0; s2idx++ { + currS2.Offset += s2[s2idx].Offset + } + if s2idx < len(s2) { + currS2.Offset += s2[s2idx].Offset + currS2.Length = s2[s2idx].Length + s2idx++ + } + } + + if currS1.Length == 0 && currS2.Length == 0 { + // The last spans of both set are zero length. Previous spans match. + return true + } + + if currS1.Offset != currS2.Offset || currS1.Length != currS2.Length { + return false + } + } +} + +func allEmptySpans(s []Span) bool { + for _, ss := range s { + if ss.Length > 0 { + return false + } + } + return true +} + +// Compact works like FloatHistogram.Compact. See there for detailed +// explanations. +func (h *Histogram) Compact(maxEmptyBuckets int) *Histogram { + h.PositiveBuckets, h.PositiveSpans = compactBuckets( + h.PositiveBuckets, h.PositiveSpans, maxEmptyBuckets, true, + ) + h.NegativeBuckets, h.NegativeSpans = compactBuckets( + h.NegativeBuckets, h.NegativeSpans, maxEmptyBuckets, true, + ) + return h +} + +// ToFloat returns a FloatHistogram representation of the Histogram. It is a deep +// copy (e.g. spans are not shared). The function accepts a FloatHistogram as an +// argument whose memory will be reused and overwritten if provided. If this +// argument is nil, a new FloatHistogram will be allocated. +func (h *Histogram) ToFloat(fh *FloatHistogram) *FloatHistogram { + if fh == nil { + fh = &FloatHistogram{} + } + fh.CounterResetHint = h.CounterResetHint + fh.Schema = h.Schema + fh.Count = float64(h.Count) + fh.Sum = h.Sum + + if h.UsesCustomBuckets() { + fh.ZeroThreshold = 0 + fh.ZeroCount = 0 + fh.NegativeSpans = clearIfNotNil(fh.NegativeSpans) + fh.NegativeBuckets = clearIfNotNil(fh.NegativeBuckets) + + fh.CustomValues = resize(fh.CustomValues, len(h.CustomValues)) + copy(fh.CustomValues, h.CustomValues) + } else { + fh.ZeroThreshold = h.ZeroThreshold + fh.ZeroCount = float64(h.ZeroCount) + + fh.NegativeSpans = resize(fh.NegativeSpans, len(h.NegativeSpans)) + copy(fh.NegativeSpans, h.NegativeSpans) + + fh.NegativeBuckets = resize(fh.NegativeBuckets, len(h.NegativeBuckets)) + var currentNegative float64 + for i, b := range h.NegativeBuckets { + currentNegative += float64(b) + fh.NegativeBuckets[i] = currentNegative + } + fh.CustomValues = clearIfNotNil(fh.CustomValues) + } + + fh.PositiveSpans = resize(fh.PositiveSpans, len(h.PositiveSpans)) + copy(fh.PositiveSpans, h.PositiveSpans) + + fh.PositiveBuckets = resize(fh.PositiveBuckets, len(h.PositiveBuckets)) + var currentPositive float64 + for i, b := range h.PositiveBuckets { + currentPositive += float64(b) + fh.PositiveBuckets[i] = currentPositive + } + + return fh +} + +func resize[T any](items []T, n int) []T { + if cap(items) < n { + return make([]T, n) + } + return items[:n] +} + +// Validate validates consistency between span and bucket slices. Also, buckets are checked +// against negative values. We check to make sure there are no unexpected fields or field values +// based on the exponential / custom buckets schema. +// For histograms that have not observed any NaN values (based on IsNaN(h.Sum) check), a +// strict h.Count = nCount + pCount + h.ZeroCount check is performed. +// Otherwise, only a lower bound check will be done (h.Count >= nCount + pCount + h.ZeroCount), +// because NaN observations do not increment the values of buckets (but they do increment +// the total h.Count). +func (h *Histogram) Validate() error { + var nCount, pCount uint64 + if h.UsesCustomBuckets() { + if err := checkHistogramCustomBounds(h.CustomValues, h.PositiveSpans, len(h.PositiveBuckets)); err != nil { + return fmt.Errorf("custom buckets: %w", err) + } + if h.ZeroCount != 0 { + return fmt.Errorf("custom buckets: must have zero count of 0") + } + if h.ZeroThreshold != 0 { + return fmt.Errorf("custom buckets: must have zero threshold of 0") + } + if len(h.NegativeSpans) > 0 { + return fmt.Errorf("custom buckets: must not have negative spans") + } + if len(h.NegativeBuckets) > 0 { + return fmt.Errorf("custom buckets: must not have negative buckets") + } + } else { + if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil { + return fmt.Errorf("positive side: %w", err) + } + if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil { + return fmt.Errorf("negative side: %w", err) + } + err := checkHistogramBuckets(h.NegativeBuckets, &nCount, true) + if err != nil { + return fmt.Errorf("negative side: %w", err) + } + if h.CustomValues != nil { + return fmt.Errorf("histogram with exponential schema must not have custom bounds") + } + } + err := checkHistogramBuckets(h.PositiveBuckets, &pCount, true) + if err != nil { + return fmt.Errorf("positive side: %w", err) + } + + sumOfBuckets := nCount + pCount + h.ZeroCount + if math.IsNaN(h.Sum) { + if sumOfBuckets > h.Count { + return fmt.Errorf("%d observations found in buckets, but the Count field is %d: %w", sumOfBuckets, h.Count, ErrHistogramCountNotBigEnough) + } + } else { + if sumOfBuckets != h.Count { + return fmt.Errorf("%d observations found in buckets, but the Count field is %d: %w", sumOfBuckets, h.Count, ErrHistogramCountMismatch) + } + } + + return nil +} + +type regularBucketIterator struct { + baseBucketIterator[uint64, int64] +} + +func newRegularBucketIterator(spans []Span, buckets []int64, schema int32, positive bool, customValues []float64) regularBucketIterator { + i := baseBucketIterator[uint64, int64]{ + schema: schema, + spans: spans, + buckets: buckets, + positive: positive, + customValues: customValues, + } + return regularBucketIterator{i} +} + +func (r *regularBucketIterator) Next() bool { + if r.spansIdx >= len(r.spans) { + return false + } + span := r.spans[r.spansIdx] + // Seed currIdx for the first bucket. + if r.bucketsIdx == 0 { + r.currIdx = span.Offset + } else { + r.currIdx++ + } + for r.idxInSpan >= span.Length { + // We have exhausted the current span and have to find a new + // one. We'll even handle pathologic spans of length 0. + r.idxInSpan = 0 + r.spansIdx++ + if r.spansIdx >= len(r.spans) { + return false + } + span = r.spans[r.spansIdx] + r.currIdx += span.Offset + } + + r.currCount += r.buckets[r.bucketsIdx] + r.idxInSpan++ + r.bucketsIdx++ + return true +} + +type cumulativeBucketIterator struct { + h *Histogram + + posSpansIdx int // Index in h.PositiveSpans we are in. -1 means 0 bucket. + posBucketsIdx int // Index in h.PositiveBuckets. + idxInSpan uint32 // Index in the current span. 0 <= idxInSpan < span.Length. + + initialized bool + currIdx int32 // The actual bucket index after decoding from spans. + currUpper float64 // The upper boundary of the current bucket. + currCount int64 // Current non-cumulative count for the current bucket. Does not apply for empty bucket. + currCumulativeCount uint64 // Current "cumulative" count for the current bucket. + + // Between 2 spans there could be some empty buckets which + // still needs to be counted for cumulative buckets. + // When we hit the end of a span, we use this to iterate + // through the empty buckets. + emptyBucketCount int32 +} + +func (c *cumulativeBucketIterator) Next() bool { + if c.posSpansIdx == -1 { + // Zero bucket. + c.posSpansIdx++ + if c.h.ZeroCount == 0 { + return c.Next() + } + + c.currUpper = c.h.ZeroThreshold + c.currCount = int64(c.h.ZeroCount) + c.currCumulativeCount = uint64(c.currCount) + return true + } + + if c.posSpansIdx >= len(c.h.PositiveSpans) { + return false + } + + if c.emptyBucketCount > 0 { + // We are traversing through empty buckets at the moment. + c.currUpper = getBound(c.currIdx, c.h.Schema, c.h.CustomValues) + c.currIdx++ + c.emptyBucketCount-- + return true + } + + span := c.h.PositiveSpans[c.posSpansIdx] + if c.posSpansIdx == 0 && !c.initialized { + // Initializing. + c.currIdx = span.Offset + // The first bucket is an absolute value and not a delta with Zero bucket. + c.currCount = 0 + c.initialized = true + } + + c.currCount += c.h.PositiveBuckets[c.posBucketsIdx] + c.currCumulativeCount += uint64(c.currCount) + c.currUpper = getBound(c.currIdx, c.h.Schema, c.h.CustomValues) + + c.posBucketsIdx++ + c.idxInSpan++ + c.currIdx++ + if c.idxInSpan >= span.Length { + // Move to the next span. This one is done. + c.posSpansIdx++ + c.idxInSpan = 0 + if c.posSpansIdx < len(c.h.PositiveSpans) { + c.emptyBucketCount = c.h.PositiveSpans[c.posSpansIdx].Offset + } + } + + return true +} + +func (c *cumulativeBucketIterator) At() Bucket[uint64] { + return Bucket[uint64]{ + Upper: c.currUpper, + Lower: math.Inf(-1), + UpperInclusive: true, + LowerInclusive: true, + Count: c.currCumulativeCount, + Index: c.currIdx - 1, + } +} + +// ReduceResolution reduces the histogram's spans, buckets into target schema. +// The target schema must be smaller than the current histogram's schema. +// This will panic if the histogram has custom buckets or if the target schema is +// a custom buckets schema. +func (h *Histogram) ReduceResolution(targetSchema int32) *Histogram { + if h.UsesCustomBuckets() { + panic("cannot reduce resolution when there are custom buckets") + } + if IsCustomBucketsSchema(targetSchema) { + panic("cannot reduce resolution to custom buckets schema") + } + if targetSchema >= h.Schema { + panic(fmt.Errorf("cannot reduce resolution from schema %d to %d", h.Schema, targetSchema)) + } + + h.PositiveSpans, h.PositiveBuckets = reduceResolution( + h.PositiveSpans, h.PositiveBuckets, h.Schema, targetSchema, true, true, + ) + h.NegativeSpans, h.NegativeBuckets = reduceResolution( + h.NegativeSpans, h.NegativeBuckets, h.Schema, targetSchema, true, true, + ) + h.Schema = targetSchema + return h +} diff --git a/vendor/github.com/prometheus/prometheus/model/histogram/test_utils.go b/vendor/github.com/prometheus/prometheus/model/histogram/test_utils.go new file mode 100644 index 00000000..9e9a711c --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/model/histogram/test_utils.go @@ -0,0 +1,52 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package histogram + +// GenerateBigTestHistograms generates a slice of histograms with given number of buckets each. +func GenerateBigTestHistograms(numHistograms, numBuckets int) []*Histogram { + numSpans := numBuckets / 10 + bucketsPerSide := numBuckets / 2 + spanLength := uint32(bucketsPerSide / numSpans) + // Given all bucket deltas are 1, sum bucketsPerSide + 1. + observationCount := bucketsPerSide * (1 + bucketsPerSide) + + var histograms []*Histogram + for i := 0; i < numHistograms; i++ { + h := &Histogram{ + Count: uint64(i + observationCount), + ZeroCount: uint64(i), + ZeroThreshold: 1e-128, + Sum: 18.4 * float64(i+1), + Schema: 2, + NegativeSpans: make([]Span, numSpans), + PositiveSpans: make([]Span, numSpans), + NegativeBuckets: make([]int64, bucketsPerSide), + PositiveBuckets: make([]int64, bucketsPerSide), + } + + for j := 0; j < numSpans; j++ { + s := Span{Offset: 1, Length: spanLength} + h.NegativeSpans[j] = s + h.PositiveSpans[j] = s + } + + for j := 0; j < bucketsPerSide; j++ { + h.NegativeBuckets[j] = 1 + h.PositiveBuckets[j] = 1 + } + + histograms = append(histograms, h) + } + return histograms +} diff --git a/vendor/github.com/prometheus/prometheus/model/labels/labels.go b/vendor/github.com/prometheus/prometheus/model/labels/labels.go new file mode 100644 index 00000000..f4de7496 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/model/labels/labels.go @@ -0,0 +1,490 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !stringlabels && !dedupelabels + +package labels + +import ( + "bytes" + "slices" + "strings" + + "github.com/cespare/xxhash/v2" +) + +// Labels is a sorted set of labels. Order has to be guaranteed upon +// instantiation. +type Labels []Label + +func (ls Labels) Len() int { return len(ls) } +func (ls Labels) Swap(i, j int) { ls[i], ls[j] = ls[j], ls[i] } +func (ls Labels) Less(i, j int) bool { return ls[i].Name < ls[j].Name } + +// Bytes returns ls as a byte slice. +// It uses an byte invalid character as a separator and so should not be used for printing. +func (ls Labels) Bytes(buf []byte) []byte { + b := bytes.NewBuffer(buf[:0]) + b.WriteByte(labelSep) + for i, l := range ls { + if i > 0 { + b.WriteByte(sep) + } + b.WriteString(l.Name) + b.WriteByte(sep) + b.WriteString(l.Value) + } + return b.Bytes() +} + +// MatchLabels returns a subset of Labels that matches/does not match with the provided label names based on the 'on' boolean. +// If on is set to true, it returns the subset of labels that match with the provided label names and its inverse when 'on' is set to false. +func (ls Labels) MatchLabels(on bool, names ...string) Labels { + matchedLabels := Labels{} + + nameSet := make(map[string]struct{}, len(names)) + for _, n := range names { + nameSet[n] = struct{}{} + } + + for _, v := range ls { + if _, ok := nameSet[v.Name]; on == ok && (on || v.Name != MetricName) { + matchedLabels = append(matchedLabels, v) + } + } + + return matchedLabels +} + +// Hash returns a hash value for the label set. +// Note: the result is not guaranteed to be consistent across different runs of Prometheus. +func (ls Labels) Hash() uint64 { + // Use xxhash.Sum64(b) for fast path as it's faster. + b := make([]byte, 0, 1024) + for i, v := range ls { + if len(b)+len(v.Name)+len(v.Value)+2 >= cap(b) { + // If labels entry is 1KB+ do not allocate whole entry. + h := xxhash.New() + _, _ = h.Write(b) + for _, v := range ls[i:] { + _, _ = h.WriteString(v.Name) + _, _ = h.Write(seps) + _, _ = h.WriteString(v.Value) + _, _ = h.Write(seps) + } + return h.Sum64() + } + + b = append(b, v.Name...) + b = append(b, sep) + b = append(b, v.Value...) + b = append(b, sep) + } + return xxhash.Sum64(b) +} + +// HashForLabels returns a hash value for the labels matching the provided names. +// 'names' have to be sorted in ascending order. +func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) { + b = b[:0] + i, j := 0, 0 + for i < len(ls) && j < len(names) { + switch { + case names[j] < ls[i].Name: + j++ + case ls[i].Name < names[j]: + i++ + default: + b = append(b, ls[i].Name...) + b = append(b, sep) + b = append(b, ls[i].Value...) + b = append(b, sep) + i++ + j++ + } + } + return xxhash.Sum64(b), b +} + +// HashWithoutLabels returns a hash value for all labels except those matching +// the provided names. +// 'names' have to be sorted in ascending order. +func (ls Labels) HashWithoutLabels(b []byte, names ...string) (uint64, []byte) { + b = b[:0] + j := 0 + for i := range ls { + for j < len(names) && names[j] < ls[i].Name { + j++ + } + if ls[i].Name == MetricName || (j < len(names) && ls[i].Name == names[j]) { + continue + } + b = append(b, ls[i].Name...) + b = append(b, sep) + b = append(b, ls[i].Value...) + b = append(b, sep) + } + return xxhash.Sum64(b), b +} + +// BytesWithLabels is just as Bytes(), but only for labels matching names. +// 'names' have to be sorted in ascending order. +func (ls Labels) BytesWithLabels(buf []byte, names ...string) []byte { + b := bytes.NewBuffer(buf[:0]) + b.WriteByte(labelSep) + i, j := 0, 0 + for i < len(ls) && j < len(names) { + switch { + case names[j] < ls[i].Name: + j++ + case ls[i].Name < names[j]: + i++ + default: + if b.Len() > 1 { + b.WriteByte(sep) + } + b.WriteString(ls[i].Name) + b.WriteByte(sep) + b.WriteString(ls[i].Value) + i++ + j++ + } + } + return b.Bytes() +} + +// BytesWithoutLabels is just as Bytes(), but only for labels not matching names. +// 'names' have to be sorted in ascending order. +func (ls Labels) BytesWithoutLabels(buf []byte, names ...string) []byte { + b := bytes.NewBuffer(buf[:0]) + b.WriteByte(labelSep) + j := 0 + for i := range ls { + for j < len(names) && names[j] < ls[i].Name { + j++ + } + if j < len(names) && ls[i].Name == names[j] { + continue + } + if b.Len() > 1 { + b.WriteByte(sep) + } + b.WriteString(ls[i].Name) + b.WriteByte(sep) + b.WriteString(ls[i].Value) + } + return b.Bytes() +} + +// Copy returns a copy of the labels. +func (ls Labels) Copy() Labels { + res := make(Labels, len(ls)) + copy(res, ls) + return res +} + +// Get returns the value for the label with the given name. +// Returns an empty string if the label doesn't exist. +func (ls Labels) Get(name string) string { + for _, l := range ls { + if l.Name == name { + return l.Value + } + } + return "" +} + +// Has returns true if the label with the given name is present. +func (ls Labels) Has(name string) bool { + for _, l := range ls { + if l.Name == name { + return true + } + } + return false +} + +// HasDuplicateLabelNames returns whether ls has duplicate label names. +// It assumes that the labelset is sorted. +func (ls Labels) HasDuplicateLabelNames() (string, bool) { + for i, l := range ls { + if i == 0 { + continue + } + if l.Name == ls[i-1].Name { + return l.Name, true + } + } + return "", false +} + +// WithoutEmpty returns the labelset without empty labels. +// May return the same labelset. +func (ls Labels) WithoutEmpty() Labels { + for _, v := range ls { + if v.Value != "" { + continue + } + // Do not copy the slice until it's necessary. + els := make(Labels, 0, len(ls)-1) + for _, v := range ls { + if v.Value != "" { + els = append(els, v) + } + } + return els + } + return ls +} + +// Equal returns whether the two label sets are equal. +func Equal(ls, o Labels) bool { + if len(ls) != len(o) { + return false + } + for i, l := range ls { + if l != o[i] { + return false + } + } + return true +} + +// EmptyLabels returns n empty Labels value, for convenience. +func EmptyLabels() Labels { + return Labels{} +} + +// New returns a sorted Labels from the given labels. +// The caller has to guarantee that all label names are unique. +func New(ls ...Label) Labels { + set := make(Labels, 0, len(ls)) + set = append(set, ls...) + slices.SortFunc(set, func(a, b Label) int { return strings.Compare(a.Name, b.Name) }) + + return set +} + +// FromStrings creates new labels from pairs of strings. +func FromStrings(ss ...string) Labels { + if len(ss)%2 != 0 { + panic("invalid number of strings") + } + res := make(Labels, 0, len(ss)/2) + for i := 0; i < len(ss); i += 2 { + res = append(res, Label{Name: ss[i], Value: ss[i+1]}) + } + + slices.SortFunc(res, func(a, b Label) int { return strings.Compare(a.Name, b.Name) }) + return res +} + +// Compare compares the two label sets. +// The result will be 0 if a==b, <0 if a < b, and >0 if a > b. +func Compare(a, b Labels) int { + l := len(a) + if len(b) < l { + l = len(b) + } + + for i := 0; i < l; i++ { + if a[i].Name != b[i].Name { + if a[i].Name < b[i].Name { + return -1 + } + return 1 + } + if a[i].Value != b[i].Value { + if a[i].Value < b[i].Value { + return -1 + } + return 1 + } + } + // If all labels so far were in common, the set with fewer labels comes first. + return len(a) - len(b) +} + +// CopyFrom copies labels from b on top of whatever was in ls previously, +// reusing memory or expanding if needed. +func (ls *Labels) CopyFrom(b Labels) { + (*ls) = append((*ls)[:0], b...) +} + +// IsEmpty returns true if ls represents an empty set of labels. +func (ls Labels) IsEmpty() bool { + return len(ls) == 0 +} + +// Range calls f on each label. +func (ls Labels) Range(f func(l Label)) { + for _, l := range ls { + f(l) + } +} + +// Validate calls f on each label. If f returns a non-nil error, then it returns that error cancelling the iteration. +func (ls Labels) Validate(f func(l Label) error) error { + for _, l := range ls { + if err := f(l); err != nil { + return err + } + } + return nil +} + +// DropMetricName returns Labels with "__name__" removed. +func (ls Labels) DropMetricName() Labels { + for i, l := range ls { + if l.Name == MetricName { + if i == 0 { // Make common case fast with no allocations. + return ls[1:] + } + // Avoid modifying original Labels - use [:i:i] so that left slice would not + // have any spare capacity and append would have to allocate a new slice for the result. + return append(ls[:i:i], ls[i+1:]...) + } + } + return ls +} + +// InternStrings calls intern on every string value inside ls, replacing them with what it returns. +func (ls *Labels) InternStrings(intern func(string) string) { + for i, l := range *ls { + (*ls)[i].Name = intern(l.Name) + (*ls)[i].Value = intern(l.Value) + } +} + +// ReleaseStrings calls release on every string value inside ls. +func (ls Labels) ReleaseStrings(release func(string)) { + for _, l := range ls { + release(l.Name) + release(l.Value) + } +} + +// Builder allows modifying Labels. +type Builder struct { + base Labels + del []string + add []Label +} + +// Reset clears all current state for the builder. +func (b *Builder) Reset(base Labels) { + b.base = base + b.del = b.del[:0] + b.add = b.add[:0] + b.base.Range(func(l Label) { + if l.Value == "" { + b.del = append(b.del, l.Name) + } + }) +} + +// Labels returns the labels from the builder. +// If no modifications were made, the original labels are returned. +func (b *Builder) Labels() Labels { + if len(b.del) == 0 && len(b.add) == 0 { + return b.base + } + + expectedSize := len(b.base) + len(b.add) - len(b.del) + if expectedSize < 1 { + expectedSize = 1 + } + res := make(Labels, 0, expectedSize) + for _, l := range b.base { + if slices.Contains(b.del, l.Name) || contains(b.add, l.Name) { + continue + } + res = append(res, l) + } + if len(b.add) > 0 { // Base is already in order, so we only need to sort if we add to it. + res = append(res, b.add...) + slices.SortFunc(res, func(a, b Label) int { return strings.Compare(a.Name, b.Name) }) + } + return res +} + +// ScratchBuilder allows efficient construction of a Labels from scratch. +type ScratchBuilder struct { + add Labels +} + +// SymbolTable is no-op, just for api parity with dedupelabels. +type SymbolTable struct{} + +func NewSymbolTable() *SymbolTable { return nil } + +func (t *SymbolTable) Len() int { return 0 } + +// NewScratchBuilder creates a ScratchBuilder initialized for Labels with n entries. +func NewScratchBuilder(n int) ScratchBuilder { + return ScratchBuilder{add: make([]Label, 0, n)} +} + +// NewBuilderWithSymbolTable creates a Builder, for api parity with dedupelabels. +func NewBuilderWithSymbolTable(_ *SymbolTable) *Builder { + return NewBuilder(EmptyLabels()) +} + +// NewScratchBuilderWithSymbolTable creates a ScratchBuilder, for api parity with dedupelabels. +func NewScratchBuilderWithSymbolTable(_ *SymbolTable, n int) ScratchBuilder { + return NewScratchBuilder(n) +} + +func (b *ScratchBuilder) SetSymbolTable(_ *SymbolTable) { + // no-op +} + +func (b *ScratchBuilder) Reset() { + b.add = b.add[:0] +} + +// Add a name/value pair. +// Note if you Add the same name twice you will get a duplicate label, which is invalid. +func (b *ScratchBuilder) Add(name, value string) { + b.add = append(b.add, Label{Name: name, Value: value}) +} + +// UnsafeAddBytes adds a name/value pair, using []byte instead of string. +// The '-tags stringlabels' version of this function is unsafe, hence the name. +// This version is safe - it copies the strings immediately - but we keep the same name so everything compiles. +func (b *ScratchBuilder) UnsafeAddBytes(name, value []byte) { + b.add = append(b.add, Label{Name: string(name), Value: string(value)}) +} + +// Sort the labels added so far by name. +func (b *ScratchBuilder) Sort() { + slices.SortFunc(b.add, func(a, b Label) int { return strings.Compare(a.Name, b.Name) }) +} + +// Assign is for when you already have a Labels which you want this ScratchBuilder to return. +func (b *ScratchBuilder) Assign(ls Labels) { + b.add = append(b.add[:0], ls...) // Copy on top of our slice, so we don't retain the input slice. +} + +// Labels returns the name/value pairs added so far as a Labels object. +// Note: if you want them sorted, call Sort() first. +func (b *ScratchBuilder) Labels() Labels { + // Copy the slice, so the next use of ScratchBuilder doesn't overwrite. + return append([]Label{}, b.add...) +} + +// Overwrite the newly-built Labels out to ls. +// Callers must ensure that there are no other references to ls, or any strings fetched from it. +func (b *ScratchBuilder) Overwrite(ls *Labels) { + *ls = append((*ls)[:0], b.add...) +} diff --git a/vendor/github.com/prometheus/prometheus/model/labels/labels_common.go b/vendor/github.com/prometheus/prometheus/model/labels/labels_common.go new file mode 100644 index 00000000..99529a38 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/model/labels/labels_common.go @@ -0,0 +1,234 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package labels + +import ( + "bytes" + "encoding/json" + "slices" + "strconv" + "unsafe" + + "github.com/prometheus/common/model" +) + +const ( + MetricName = "__name__" + AlertName = "alertname" + BucketLabel = "le" + InstanceName = "instance" + + labelSep = '\xfe' // Used at beginning of `Bytes` return. + sep = '\xff' // Used between labels in `Bytes` and `Hash`. +) + +var seps = []byte{sep} // Used with Hash, which has no WriteByte method. + +// Label is a key/value pair of strings. +type Label struct { + Name, Value string +} + +func (ls Labels) String() string { + var bytea [1024]byte // On stack to avoid memory allocation while building the output. + b := bytes.NewBuffer(bytea[:0]) + + b.WriteByte('{') + i := 0 + ls.Range(func(l Label) { + if i > 0 { + b.WriteByte(',') + b.WriteByte(' ') + } + b.WriteString(l.Name) + b.WriteByte('=') + b.Write(strconv.AppendQuote(b.AvailableBuffer(), l.Value)) + i++ + }) + b.WriteByte('}') + return b.String() +} + +// MarshalJSON implements json.Marshaler. +func (ls Labels) MarshalJSON() ([]byte, error) { + return json.Marshal(ls.Map()) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (ls *Labels) UnmarshalJSON(b []byte) error { + var m map[string]string + + if err := json.Unmarshal(b, &m); err != nil { + return err + } + + *ls = FromMap(m) + return nil +} + +// MarshalYAML implements yaml.Marshaler. +func (ls Labels) MarshalYAML() (interface{}, error) { + return ls.Map(), nil +} + +// UnmarshalYAML implements yaml.Unmarshaler. +func (ls *Labels) UnmarshalYAML(unmarshal func(interface{}) error) error { + var m map[string]string + + if err := unmarshal(&m); err != nil { + return err + } + + *ls = FromMap(m) + return nil +} + +// IsValid checks if the metric name or label names are valid. +func (ls Labels) IsValid(validationScheme model.ValidationScheme) bool { + err := ls.Validate(func(l Label) error { + if l.Name == model.MetricNameLabel { + // If the default validation scheme has been overridden with legacy mode, + // we need to call the special legacy validation checker. + if validationScheme == model.LegacyValidation && model.NameValidationScheme == model.UTF8Validation && !model.IsValidLegacyMetricName(string(model.LabelValue(l.Value))) { + return strconv.ErrSyntax + } + if !model.IsValidMetricName(model.LabelValue(l.Value)) { + return strconv.ErrSyntax + } + } + if validationScheme == model.LegacyValidation && model.NameValidationScheme == model.UTF8Validation { + if !model.LabelName(l.Name).IsValidLegacy() || !model.LabelValue(l.Value).IsValid() { + return strconv.ErrSyntax + } + } else if !model.LabelName(l.Name).IsValid() || !model.LabelValue(l.Value).IsValid() { + return strconv.ErrSyntax + } + return nil + }) + return err == nil +} + +// Map returns a string map of the labels. +func (ls Labels) Map() map[string]string { + m := make(map[string]string) + ls.Range(func(l Label) { + m[l.Name] = l.Value + }) + return m +} + +// FromMap returns new sorted Labels from the given map. +func FromMap(m map[string]string) Labels { + l := make([]Label, 0, len(m)) + for k, v := range m { + l = append(l, Label{Name: k, Value: v}) + } + return New(l...) +} + +// NewBuilder returns a new LabelsBuilder. +func NewBuilder(base Labels) *Builder { + b := &Builder{ + del: make([]string, 0, 5), + add: make([]Label, 0, 5), + } + b.Reset(base) + return b +} + +// Del deletes the label of the given name. +func (b *Builder) Del(ns ...string) *Builder { + for _, n := range ns { + for i, a := range b.add { + if a.Name == n { + b.add = append(b.add[:i], b.add[i+1:]...) + } + } + b.del = append(b.del, n) + } + return b +} + +// Keep removes all labels from the base except those with the given names. +func (b *Builder) Keep(ns ...string) *Builder { + b.base.Range(func(l Label) { + for _, n := range ns { + if l.Name == n { + return + } + } + b.del = append(b.del, l.Name) + }) + return b +} + +// Set the name/value pair as a label. A value of "" means delete that label. +func (b *Builder) Set(n, v string) *Builder { + if v == "" { + // Empty labels are the same as missing labels. + return b.Del(n) + } + for i, a := range b.add { + if a.Name == n { + b.add[i].Value = v + return b + } + } + b.add = append(b.add, Label{Name: n, Value: v}) + + return b +} + +func (b *Builder) Get(n string) string { + // Del() removes entries from .add but Set() does not remove from .del, so check .add first. + for _, a := range b.add { + if a.Name == n { + return a.Value + } + } + if slices.Contains(b.del, n) { + return "" + } + return b.base.Get(n) +} + +// Range calls f on each label in the Builder. +func (b *Builder) Range(f func(l Label)) { + // Stack-based arrays to avoid heap allocation in most cases. + var addStack [128]Label + var delStack [128]string + // Take a copy of add and del, so they are unaffected by calls to Set() or Del(). + origAdd, origDel := append(addStack[:0], b.add...), append(delStack[:0], b.del...) + b.base.Range(func(l Label) { + if !slices.Contains(origDel, l.Name) && !contains(origAdd, l.Name) { + f(l) + } + }) + for _, a := range origAdd { + f(a) + } +} + +func contains(s []Label, n string) bool { + for _, a := range s { + if a.Name == n { + return true + } + } + return false +} + +func yoloString(b []byte) string { + return unsafe.String(unsafe.SliceData(b), len(b)) +} diff --git a/vendor/github.com/prometheus/prometheus/model/labels/labels_dedupelabels.go b/vendor/github.com/prometheus/prometheus/model/labels/labels_dedupelabels.go new file mode 100644 index 00000000..da8a88cc --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/model/labels/labels_dedupelabels.go @@ -0,0 +1,817 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build dedupelabels + +package labels + +import ( + "bytes" + "slices" + "strings" + "sync" + + "github.com/cespare/xxhash/v2" +) + +// Labels is implemented by a SymbolTable and string holding name/value +// pairs encoded as indexes into the table in varint encoding. +// Names are in alphabetical order. +type Labels struct { + syms *nameTable + data string +} + +// Split SymbolTable into the part used by Labels and the part used by Builder. Only the latter needs the map. + +// This part is used by Labels. All fields are immutable after construction. +type nameTable struct { + byNum []string // This slice header is never changed, even while we are building the symbol table. + symbolTable *SymbolTable // If we need to use it in a Builder. +} + +// SymbolTable is used to map strings into numbers so they can be packed together. +type SymbolTable struct { + mx sync.Mutex + *nameTable + nextNum int + byName map[string]int +} + +const defaultSymbolTableSize = 1024 + +func NewSymbolTable() *SymbolTable { + t := &SymbolTable{ + nameTable: &nameTable{byNum: make([]string, defaultSymbolTableSize)}, + byName: make(map[string]int, defaultSymbolTableSize), + } + t.nameTable.symbolTable = t + return t +} + +func (t *SymbolTable) Len() int { + t.mx.Lock() + defer t.mx.Unlock() + return len(t.byName) +} + +// ToNum maps a string to an integer, adding the string to the table if it is not already there. +// Note: copies the string before adding, in case the caller passed part of +// a buffer that should not be kept alive by this SymbolTable. +func (t *SymbolTable) ToNum(name string) int { + t.mx.Lock() + defer t.mx.Unlock() + return t.toNumUnlocked(name) +} + +func (t *SymbolTable) toNumUnlocked(name string) int { + if i, found := t.byName[name]; found { + return i + } + i := t.nextNum + if t.nextNum == cap(t.byNum) { + // Name table is full; copy to a new one. Don't touch the existing slice, as nameTable is immutable after construction. + newSlice := make([]string, cap(t.byNum)*2) + copy(newSlice, t.byNum) + t.nameTable = &nameTable{byNum: newSlice, symbolTable: t} + } + name = strings.Clone(name) + t.byNum[i] = name + t.byName[name] = i + t.nextNum++ + return i +} + +func (t *SymbolTable) checkNum(name string) (int, bool) { + t.mx.Lock() + defer t.mx.Unlock() + i, bool := t.byName[name] + return i, bool +} + +// ToName maps an integer to a string. +func (t *nameTable) ToName(num int) string { + return t.byNum[num] +} + +// "Varint" in this file is non-standard: we encode small numbers (up to 32767) in 2 bytes, +// because we expect most Prometheus to have more than 127 unique strings. +// And we don't encode numbers larger than 4 bytes because we don't expect more than 536,870,912 unique strings. +func decodeVarint(data string, index int) (int, int) { + b := int(data[index]) + int(data[index+1])<<8 + index += 2 + if b < 0x8000 { + return b, index + } + return decodeVarintRest(b, data, index) +} + +func decodeVarintRest(b int, data string, index int) (int, int) { + value := int(b & 0x7FFF) + b = int(data[index]) + index++ + if b < 0x80 { + return value | (b << 15), index + } + + value |= (b & 0x7f) << 15 + b = int(data[index]) + index++ + return value | (b << 22), index +} + +func decodeString(t *nameTable, data string, index int) (string, int) { + // Copy decodeVarint here, because the Go compiler says it's too big to inline. + num := int(data[index]) + int(data[index+1])<<8 + index += 2 + if num >= 0x8000 { + num, index = decodeVarintRest(num, data, index) + } + return t.ToName(num), index +} + +// Bytes returns ls as a byte slice. +// It uses non-printing characters and so should not be used for printing. +func (ls Labels) Bytes(buf []byte) []byte { + b := bytes.NewBuffer(buf[:0]) + for i := 0; i < len(ls.data); { + if i > 0 { + b.WriteByte(sep) + } + var name, value string + name, i = decodeString(ls.syms, ls.data, i) + value, i = decodeString(ls.syms, ls.data, i) + b.WriteString(name) + b.WriteByte(sep) + b.WriteString(value) + } + return b.Bytes() +} + +// IsZero implements yaml.IsZeroer - if we don't have this then 'omitempty' fields are always omitted. +func (ls Labels) IsZero() bool { + return len(ls.data) == 0 +} + +// MatchLabels returns a subset of Labels that matches/does not match with the provided label names based on the 'on' boolean. +// If on is set to true, it returns the subset of labels that match with the provided label names and its inverse when 'on' is set to false. +// TODO: This is only used in printing an error message +func (ls Labels) MatchLabels(on bool, names ...string) Labels { + b := NewBuilder(ls) + if on { + b.Keep(names...) + } else { + b.Del(MetricName) + b.Del(names...) + } + return b.Labels() +} + +// Hash returns a hash value for the label set. +// Note: the result is not guaranteed to be consistent across different runs of Prometheus. +func (ls Labels) Hash() uint64 { + // Use xxhash.Sum64(b) for fast path as it's faster. + b := make([]byte, 0, 1024) + for pos := 0; pos < len(ls.data); { + name, newPos := decodeString(ls.syms, ls.data, pos) + value, newPos := decodeString(ls.syms, ls.data, newPos) + if len(b)+len(name)+len(value)+2 >= cap(b) { + // If labels entry is 1KB+, hash the rest of them via Write(). + h := xxhash.New() + _, _ = h.Write(b) + for pos < len(ls.data) { + name, pos = decodeString(ls.syms, ls.data, pos) + value, pos = decodeString(ls.syms, ls.data, pos) + _, _ = h.WriteString(name) + _, _ = h.Write(seps) + _, _ = h.WriteString(value) + _, _ = h.Write(seps) + } + return h.Sum64() + } + + b = append(b, name...) + b = append(b, sep) + b = append(b, value...) + b = append(b, sep) + pos = newPos + } + return xxhash.Sum64(b) +} + +// HashForLabels returns a hash value for the labels matching the provided names. +// 'names' have to be sorted in ascending order. +func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) { + b = b[:0] + j := 0 + for i := 0; i < len(ls.data); { + var name, value string + name, i = decodeString(ls.syms, ls.data, i) + value, i = decodeString(ls.syms, ls.data, i) + for j < len(names) && names[j] < name { + j++ + } + if j == len(names) { + break + } + if name == names[j] { + b = append(b, name...) + b = append(b, sep) + b = append(b, value...) + b = append(b, sep) + } + } + + return xxhash.Sum64(b), b +} + +// HashWithoutLabels returns a hash value for all labels except those matching +// the provided names. +// 'names' have to be sorted in ascending order. +func (ls Labels) HashWithoutLabels(b []byte, names ...string) (uint64, []byte) { + b = b[:0] + j := 0 + for i := 0; i < len(ls.data); { + var name, value string + name, i = decodeString(ls.syms, ls.data, i) + value, i = decodeString(ls.syms, ls.data, i) + for j < len(names) && names[j] < name { + j++ + } + if name == MetricName || (j < len(names) && name == names[j]) { + continue + } + b = append(b, name...) + b = append(b, sep) + b = append(b, value...) + b = append(b, sep) + } + return xxhash.Sum64(b), b +} + +// BytesWithLabels is just as Bytes(), but only for labels matching names. +// 'names' have to be sorted in ascending order. +func (ls Labels) BytesWithLabels(buf []byte, names ...string) []byte { + b := bytes.NewBuffer(buf[:0]) + j := 0 + for pos := 0; pos < len(ls.data); { + lName, newPos := decodeString(ls.syms, ls.data, pos) + lValue, newPos := decodeString(ls.syms, ls.data, newPos) + for j < len(names) && names[j] < lName { + j++ + } + if j == len(names) { + break + } + if lName == names[j] { + if b.Len() > 1 { + b.WriteByte(sep) + } + b.WriteString(lName) + b.WriteByte(sep) + b.WriteString(lValue) + } + pos = newPos + } + return b.Bytes() +} + +// BytesWithoutLabels is just as Bytes(), but only for labels not matching names. +// 'names' have to be sorted in ascending order. +func (ls Labels) BytesWithoutLabels(buf []byte, names ...string) []byte { + b := bytes.NewBuffer(buf[:0]) + j := 0 + for pos := 0; pos < len(ls.data); { + lName, newPos := decodeString(ls.syms, ls.data, pos) + lValue, newPos := decodeString(ls.syms, ls.data, newPos) + for j < len(names) && names[j] < lName { + j++ + } + if j == len(names) || lName != names[j] { + if b.Len() > 1 { + b.WriteByte(sep) + } + b.WriteString(lName) + b.WriteByte(sep) + b.WriteString(lValue) + } + pos = newPos + } + return b.Bytes() +} + +// Copy returns a copy of the labels. +func (ls Labels) Copy() Labels { + return Labels{syms: ls.syms, data: strings.Clone(ls.data)} +} + +// Get returns the value for the label with the given name. +// Returns an empty string if the label doesn't exist. +func (ls Labels) Get(name string) string { + if name == "" { // Avoid crash in loop if someone asks for "". + return "" // Prometheus does not store blank label names. + } + for i := 0; i < len(ls.data); { + var lName, lValue string + lName, i = decodeString(ls.syms, ls.data, i) + if lName == name { + lValue, _ = decodeString(ls.syms, ls.data, i) + return lValue + } else if lName[0] > name[0] { // Stop looking if we've gone past. + break + } + // Copy decodeVarint here, because the Go compiler says it's too big to inline. + num := int(ls.data[i]) + int(ls.data[i+1])<<8 + i += 2 + if num >= 0x8000 { + _, i = decodeVarintRest(num, ls.data, i) + } + } + return "" +} + +// Has returns true if the label with the given name is present. +func (ls Labels) Has(name string) bool { + if name == "" { // Avoid crash in loop if someone asks for "". + return false // Prometheus does not store blank label names. + } + for i := 0; i < len(ls.data); { + var lName string + lName, i = decodeString(ls.syms, ls.data, i) + if lName == name { + return true + } else if lName[0] > name[0] { // Stop looking if we've gone past. + break + } + // Copy decodeVarint here, because the Go compiler says it's too big to inline. + num := int(ls.data[i]) + int(ls.data[i+1])<<8 + i += 2 + if num >= 0x8000 { + _, i = decodeVarintRest(num, ls.data, i) + } + } + return false +} + +// HasDuplicateLabelNames returns whether ls has duplicate label names. +// It assumes that the labelset is sorted. +func (ls Labels) HasDuplicateLabelNames() (string, bool) { + prevNum := -1 + for i := 0; i < len(ls.data); { + var lNum int + lNum, i = decodeVarint(ls.data, i) + _, i = decodeVarint(ls.data, i) + if lNum == prevNum { + return ls.syms.ToName(lNum), true + } + prevNum = lNum + } + return "", false +} + +// WithoutEmpty returns the labelset without empty labels. +// May return the same labelset. +func (ls Labels) WithoutEmpty() Labels { + if ls.IsEmpty() { + return ls + } + // Idea: have a constant symbol for blank, then we don't have to look it up. + blank, ok := ls.syms.symbolTable.checkNum("") + if !ok { // Symbol table has no entry for blank - none of the values can be blank. + return ls + } + for pos := 0; pos < len(ls.data); { + _, newPos := decodeVarint(ls.data, pos) + lValue, newPos := decodeVarint(ls.data, newPos) + if lValue != blank { + pos = newPos + continue + } + // Do not copy the slice until it's necessary. + // TODO: could optimise the case where all blanks are at the end. + // Note: we size the new buffer on the assumption there is exactly one blank value. + buf := make([]byte, pos, pos+(len(ls.data)-newPos)) + copy(buf, ls.data[:pos]) // copy the initial non-blank labels + pos = newPos // move past the first blank value + for pos < len(ls.data) { + var newPos int + _, newPos = decodeVarint(ls.data, pos) + lValue, newPos = decodeVarint(ls.data, newPos) + if lValue != blank { + buf = append(buf, ls.data[pos:newPos]...) + } + pos = newPos + } + return Labels{syms: ls.syms, data: yoloString(buf)} + } + return ls +} + +// Equal returns whether the two label sets are equal. +func Equal(a, b Labels) bool { + if a.syms == b.syms { + return a.data == b.data + } + + la, lb := len(a.data), len(b.data) + ia, ib := 0, 0 + for ia < la && ib < lb { + var aValue, bValue string + aValue, ia = decodeString(a.syms, a.data, ia) + bValue, ib = decodeString(b.syms, b.data, ib) + if aValue != bValue { + return false + } + } + if ia != la || ib != lb { + return false + } + return true +} + +// EmptyLabels returns an empty Labels value, for convenience. +func EmptyLabels() Labels { + return Labels{} +} + +// New returns a sorted Labels from the given labels. +// The caller has to guarantee that all label names are unique. +// Note this function is not efficient; should not be used in performance-critical places. +func New(ls ...Label) Labels { + slices.SortFunc(ls, func(a, b Label) int { return strings.Compare(a.Name, b.Name) }) + syms := NewSymbolTable() + var stackSpace [16]int + size, nums := mapLabelsToNumbers(syms, ls, stackSpace[:]) + buf := make([]byte, size) + marshalNumbersToSizedBuffer(nums, buf) + return Labels{syms: syms.nameTable, data: yoloString(buf)} +} + +// FromStrings creates new labels from pairs of strings. +func FromStrings(ss ...string) Labels { + if len(ss)%2 != 0 { + panic("invalid number of strings") + } + ls := make([]Label, 0, len(ss)/2) + for i := 0; i < len(ss); i += 2 { + ls = append(ls, Label{Name: ss[i], Value: ss[i+1]}) + } + + return New(ls...) +} + +// Compare compares the two label sets. +// The result will be 0 if a==b, <0 if a < b, and >0 if a > b. +func Compare(a, b Labels) int { + la, lb := len(a.data), len(b.data) + ia, ib := 0, 0 + for ia < la && ib < lb { + var aName, bName string + aName, ia = decodeString(a.syms, a.data, ia) + bName, ib = decodeString(b.syms, b.data, ib) + if aName != bName { + if aName < bName { + return -1 + } + return 1 + } + var aValue, bValue string + aValue, ia = decodeString(a.syms, a.data, ia) + bValue, ib = decodeString(b.syms, b.data, ib) + if aValue != bValue { + if aValue < bValue { + return -1 + } + return 1 + } + } + // If all labels so far were in common, the set with fewer labels comes first. + return (la - ia) - (lb - ib) +} + +// Copy labels from b on top of whatever was in ls previously, reusing memory or expanding if needed. +func (ls *Labels) CopyFrom(b Labels) { + *ls = b // Straightforward memberwise copy is all we need. +} + +// IsEmpty returns true if ls represents an empty set of labels. +func (ls Labels) IsEmpty() bool { + return len(ls.data) == 0 +} + +// Len returns the number of labels; it is relatively slow. +func (ls Labels) Len() int { + count := 0 + for i := 0; i < len(ls.data); { + _, i = decodeVarint(ls.data, i) + _, i = decodeVarint(ls.data, i) + count++ + } + return count +} + +// Range calls f on each label. +func (ls Labels) Range(f func(l Label)) { + for i := 0; i < len(ls.data); { + var lName, lValue string + lName, i = decodeString(ls.syms, ls.data, i) + lValue, i = decodeString(ls.syms, ls.data, i) + f(Label{Name: lName, Value: lValue}) + } +} + +// Validate calls f on each label. If f returns a non-nil error, then it returns that error cancelling the iteration. +func (ls Labels) Validate(f func(l Label) error) error { + for i := 0; i < len(ls.data); { + var lName, lValue string + lName, i = decodeString(ls.syms, ls.data, i) + lValue, i = decodeString(ls.syms, ls.data, i) + err := f(Label{Name: lName, Value: lValue}) + if err != nil { + return err + } + } + return nil +} + +// InternStrings calls intern on every string value inside ls, replacing them with what it returns. +func (ls *Labels) InternStrings(intern func(string) string) { + // TODO: remove these calls as there is nothing to do. +} + +// ReleaseStrings calls release on every string value inside ls. +func (ls Labels) ReleaseStrings(release func(string)) { + // TODO: remove these calls as there is nothing to do. +} + +// DropMetricName returns Labels with "__name__" removed. +func (ls Labels) DropMetricName() Labels { + for i := 0; i < len(ls.data); { + lName, i2 := decodeString(ls.syms, ls.data, i) + _, i2 = decodeVarint(ls.data, i2) + if lName == MetricName { + if i == 0 { // Make common case fast with no allocations. + ls.data = ls.data[i2:] + } else { + ls.data = ls.data[:i] + ls.data[i2:] + } + break + } else if lName[0] > MetricName[0] { // Stop looking if we've gone past. + break + } + i = i2 + } + return ls +} + +// Builder allows modifying Labels. +type Builder struct { + syms *SymbolTable + nums []int + base Labels + del []string + add []Label +} + +// NewBuilderWithSymbolTable returns a new LabelsBuilder not based on any labels, but with the SymbolTable. +func NewBuilderWithSymbolTable(s *SymbolTable) *Builder { + return &Builder{ + syms: s, + } +} + +// Reset clears all current state for the builder. +func (b *Builder) Reset(base Labels) { + if base.syms != nil { // If base has a symbol table, use that. + b.syms = base.syms.symbolTable + } else if b.syms == nil { // Or continue using previous symbol table in builder. + b.syms = NewSymbolTable() // Don't do this in performance-sensitive code. + } + + b.base = base + b.del = b.del[:0] + b.add = b.add[:0] + base.Range(func(l Label) { + if l.Value == "" { + b.del = append(b.del, l.Name) + } + }) +} + +// Labels returns the labels from the builder. +// If no modifications were made, the original labels are returned. +func (b *Builder) Labels() Labels { + if len(b.del) == 0 && len(b.add) == 0 { + return b.base + } + + slices.SortFunc(b.add, func(a, b Label) int { return strings.Compare(a.Name, b.Name) }) + slices.Sort(b.del) + a, d, newSize := 0, 0, 0 + + newSize, b.nums = mapLabelsToNumbers(b.syms, b.add, b.nums) + bufSize := len(b.base.data) + newSize + buf := make([]byte, 0, bufSize) + for pos := 0; pos < len(b.base.data); { + oldPos := pos + var lName string + lName, pos = decodeString(b.base.syms, b.base.data, pos) + _, pos = decodeVarint(b.base.data, pos) + for d < len(b.del) && b.del[d] < lName { + d++ + } + if d < len(b.del) && b.del[d] == lName { + continue // This label has been deleted. + } + for ; a < len(b.add) && b.add[a].Name < lName; a++ { + buf = appendLabelTo(b.nums[a*2], b.nums[a*2+1], buf) // Insert label that was not in the base set. + } + if a < len(b.add) && b.add[a].Name == lName { + buf = appendLabelTo(b.nums[a*2], b.nums[a*2+1], buf) + a++ + continue // This label has been replaced. + } + buf = append(buf, b.base.data[oldPos:pos]...) // If base had a symbol-table we are using it, so we don't need to look up these symbols. + } + // We have come to the end of the base set; add any remaining labels. + for ; a < len(b.add); a++ { + buf = appendLabelTo(b.nums[a*2], b.nums[a*2+1], buf) + } + return Labels{syms: b.syms.nameTable, data: yoloString(buf)} +} + +func marshalNumbersToSizedBuffer(nums []int, data []byte) int { + i := len(data) + for index := len(nums) - 1; index >= 0; index-- { + i = encodeVarint(data, i, nums[index]) + } + return len(data) - i +} + +func sizeVarint(x uint64) (n int) { + // Most common case first + if x < 1<<15 { + return 2 + } + if x < 1<<22 { + return 3 + } + if x >= 1<<29 { + panic("Number too large to represent") + } + return 4 +} + +func encodeVarintSlow(data []byte, offset int, v uint64) int { + offset -= sizeVarint(v) + base := offset + data[offset] = uint8(v) + v >>= 8 + offset++ + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return base +} + +// Special code for the common case that a value is less than 32768 +func encodeVarint(data []byte, offset, v int) int { + if v < 1<<15 { + offset -= 2 + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + return offset + } + return encodeVarintSlow(data, offset, uint64(v)) +} + +// Map all the strings in lbls to the symbol table; return the total size required to hold them and all the individual mappings. +func mapLabelsToNumbers(t *SymbolTable, lbls []Label, buf []int) (totalSize int, nums []int) { + nums = buf[:0] + t.mx.Lock() + defer t.mx.Unlock() + // we just encode name/value/name/value, without any extra tags or length bytes + for _, m := range lbls { + // strings are encoded as a single varint, the index into the symbol table. + i := t.toNumUnlocked(m.Name) + nums = append(nums, i) + totalSize += sizeVarint(uint64(i)) + i = t.toNumUnlocked(m.Value) + nums = append(nums, i) + totalSize += sizeVarint(uint64(i)) + } + return totalSize, nums +} + +func appendLabelTo(nameNum, valueNum int, buf []byte) []byte { + size := sizeVarint(uint64(nameNum)) + sizeVarint(uint64(valueNum)) + sizeRequired := len(buf) + size + if cap(buf) >= sizeRequired { + buf = buf[:sizeRequired] + } else { + bufSize := cap(buf) + // Double size of buffer each time it needs to grow, to amortise copying cost. + for bufSize < sizeRequired { + bufSize = bufSize*2 + 1 + } + newBuf := make([]byte, sizeRequired, bufSize) + copy(newBuf, buf) + buf = newBuf + } + i := sizeRequired + i = encodeVarint(buf, i, valueNum) + i = encodeVarint(buf, i, nameNum) + return buf +} + +// ScratchBuilder allows efficient construction of a Labels from scratch. +type ScratchBuilder struct { + syms *SymbolTable + nums []int + add []Label + output Labels + overwriteBuffer []byte +} + +// NewScratchBuilder creates a ScratchBuilder initialized for Labels with n entries. +// Warning: expensive; don't call in tight loops. +func NewScratchBuilder(n int) ScratchBuilder { + return ScratchBuilder{syms: NewSymbolTable(), add: make([]Label, 0, n)} +} + +// NewScratchBuilderWithSymbolTable creates a ScratchBuilder initialized for Labels with n entries. +func NewScratchBuilderWithSymbolTable(s *SymbolTable, n int) ScratchBuilder { + return ScratchBuilder{syms: s, add: make([]Label, 0, n)} +} + +func (b *ScratchBuilder) SetSymbolTable(s *SymbolTable) { + b.syms = s +} + +func (b *ScratchBuilder) Reset() { + b.add = b.add[:0] + b.output = EmptyLabels() +} + +// Add a name/value pair. +// Note if you Add the same name twice you will get a duplicate label, which is invalid. +func (b *ScratchBuilder) Add(name, value string) { + b.add = append(b.add, Label{Name: name, Value: value}) +} + +// Add a name/value pair, using []byte instead of string to reduce memory allocations. +// The values must remain live until Labels() is called. +func (b *ScratchBuilder) UnsafeAddBytes(name, value []byte) { + b.add = append(b.add, Label{Name: yoloString(name), Value: yoloString(value)}) +} + +// Sort the labels added so far by name. +func (b *ScratchBuilder) Sort() { + slices.SortFunc(b.add, func(a, b Label) int { return strings.Compare(a.Name, b.Name) }) +} + +// Assign is for when you already have a Labels which you want this ScratchBuilder to return. +func (b *ScratchBuilder) Assign(l Labels) { + b.output = l +} + +// Labels returns the name/value pairs added as a Labels object. Calling Add() after Labels() has no effect. +// Note: if you want them sorted, call Sort() first. +func (b *ScratchBuilder) Labels() Labels { + if b.output.IsEmpty() { + var size int + size, b.nums = mapLabelsToNumbers(b.syms, b.add, b.nums) + buf := make([]byte, size) + marshalNumbersToSizedBuffer(b.nums, buf) + b.output = Labels{syms: b.syms.nameTable, data: yoloString(buf)} + } + return b.output +} + +// Write the newly-built Labels out to ls, reusing an internal buffer. +// Callers must ensure that there are no other references to ls, or any strings fetched from it. +func (b *ScratchBuilder) Overwrite(ls *Labels) { + var size int + size, b.nums = mapLabelsToNumbers(b.syms, b.add, b.nums) + if size <= cap(b.overwriteBuffer) { + b.overwriteBuffer = b.overwriteBuffer[:size] + } else { + b.overwriteBuffer = make([]byte, size) + } + marshalNumbersToSizedBuffer(b.nums, b.overwriteBuffer) + ls.syms = b.syms.nameTable + ls.data = yoloString(b.overwriteBuffer) +} diff --git a/vendor/github.com/prometheus/prometheus/model/labels/labels_stringlabels.go b/vendor/github.com/prometheus/prometheus/model/labels/labels_stringlabels.go new file mode 100644 index 00000000..c64bb990 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/model/labels/labels_stringlabels.go @@ -0,0 +1,693 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build stringlabels + +package labels + +import ( + "slices" + "strings" + "unsafe" + + "github.com/cespare/xxhash/v2" +) + +// Labels is implemented by a single flat string holding name/value pairs. +// Each name and value is preceded by its length in varint encoding. +// Names are in order. +type Labels struct { + data string +} + +func decodeSize(data string, index int) (int, int) { + // Fast-path for common case of a single byte, value 0..127. + b := data[index] + index++ + if b < 0x80 { + return int(b), index + } + size := int(b & 0x7F) + for shift := uint(7); ; shift += 7 { + // Just panic if we go of the end of data, since all Labels strings are constructed internally and + // malformed data indicates a bug, or memory corruption. + b := data[index] + index++ + size |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + return size, index +} + +func decodeString(data string, index int) (string, int) { + var size int + size, index = decodeSize(data, index) + return data[index : index+size], index + size +} + +// Bytes returns ls as a byte slice. +// It uses non-printing characters and so should not be used for printing. +func (ls Labels) Bytes(buf []byte) []byte { + if cap(buf) < len(ls.data) { + buf = make([]byte, len(ls.data)) + } else { + buf = buf[:len(ls.data)] + } + copy(buf, ls.data) + return buf +} + +// IsZero implements yaml.IsZeroer - if we don't have this then 'omitempty' fields are always omitted. +func (ls Labels) IsZero() bool { + return len(ls.data) == 0 +} + +// MatchLabels returns a subset of Labels that matches/does not match with the provided label names based on the 'on' boolean. +// If on is set to true, it returns the subset of labels that match with the provided label names and its inverse when 'on' is set to false. +// TODO: This is only used in printing an error message +func (ls Labels) MatchLabels(on bool, names ...string) Labels { + b := NewBuilder(ls) + if on { + b.Keep(names...) + } else { + b.Del(MetricName) + b.Del(names...) + } + return b.Labels() +} + +// Hash returns a hash value for the label set. +// Note: the result is not guaranteed to be consistent across different runs of Prometheus. +func (ls Labels) Hash() uint64 { + return xxhash.Sum64(yoloBytes(ls.data)) +} + +// HashForLabels returns a hash value for the labels matching the provided names. +// 'names' have to be sorted in ascending order. +func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) { + b = b[:0] + j := 0 + for i := 0; i < len(ls.data); { + var name, value string + name, i = decodeString(ls.data, i) + value, i = decodeString(ls.data, i) + for j < len(names) && names[j] < name { + j++ + } + if j == len(names) { + break + } + if name == names[j] { + b = append(b, name...) + b = append(b, sep) + b = append(b, value...) + b = append(b, sep) + } + } + + return xxhash.Sum64(b), b +} + +// HashWithoutLabels returns a hash value for all labels except those matching +// the provided names. +// 'names' have to be sorted in ascending order. +func (ls Labels) HashWithoutLabels(b []byte, names ...string) (uint64, []byte) { + b = b[:0] + j := 0 + for i := 0; i < len(ls.data); { + var name, value string + name, i = decodeString(ls.data, i) + value, i = decodeString(ls.data, i) + for j < len(names) && names[j] < name { + j++ + } + if name == MetricName || (j < len(names) && name == names[j]) { + continue + } + b = append(b, name...) + b = append(b, sep) + b = append(b, value...) + b = append(b, sep) + } + return xxhash.Sum64(b), b +} + +// BytesWithLabels is just as Bytes(), but only for labels matching names. +// 'names' have to be sorted in ascending order. +func (ls Labels) BytesWithLabels(buf []byte, names ...string) []byte { + b := buf[:0] + j := 0 + for pos := 0; pos < len(ls.data); { + lName, newPos := decodeString(ls.data, pos) + _, newPos = decodeString(ls.data, newPos) + for j < len(names) && names[j] < lName { + j++ + } + if j == len(names) { + break + } + if lName == names[j] { + b = append(b, ls.data[pos:newPos]...) + } + pos = newPos + } + return b +} + +// BytesWithoutLabels is just as Bytes(), but only for labels not matching names. +// 'names' have to be sorted in ascending order. +func (ls Labels) BytesWithoutLabels(buf []byte, names ...string) []byte { + b := buf[:0] + j := 0 + for pos := 0; pos < len(ls.data); { + lName, newPos := decodeString(ls.data, pos) + _, newPos = decodeString(ls.data, newPos) + for j < len(names) && names[j] < lName { + j++ + } + if j == len(names) || lName != names[j] { + b = append(b, ls.data[pos:newPos]...) + } + pos = newPos + } + return b +} + +// Copy returns a copy of the labels. +func (ls Labels) Copy() Labels { + return Labels{data: strings.Clone(ls.data)} +} + +// Get returns the value for the label with the given name. +// Returns an empty string if the label doesn't exist. +func (ls Labels) Get(name string) string { + if name == "" { // Avoid crash in loop if someone asks for "". + return "" // Prometheus does not store blank label names. + } + for i := 0; i < len(ls.data); { + var size int + size, i = decodeSize(ls.data, i) + if ls.data[i] == name[0] { + lName := ls.data[i : i+size] + i += size + if lName == name { + lValue, _ := decodeString(ls.data, i) + return lValue + } + } else { + if ls.data[i] > name[0] { // Stop looking if we've gone past. + break + } + i += size + } + size, i = decodeSize(ls.data, i) + i += size + } + return "" +} + +// Has returns true if the label with the given name is present. +func (ls Labels) Has(name string) bool { + if name == "" { // Avoid crash in loop if someone asks for "". + return false // Prometheus does not store blank label names. + } + for i := 0; i < len(ls.data); { + var size int + size, i = decodeSize(ls.data, i) + if ls.data[i] == name[0] { + lName := ls.data[i : i+size] + i += size + if lName == name { + return true + } + } else { + if ls.data[i] > name[0] { // Stop looking if we've gone past. + break + } + i += size + } + size, i = decodeSize(ls.data, i) + i += size + } + return false +} + +// HasDuplicateLabelNames returns whether ls has duplicate label names. +// It assumes that the labelset is sorted. +func (ls Labels) HasDuplicateLabelNames() (string, bool) { + var lName, prevName string + for i := 0; i < len(ls.data); { + lName, i = decodeString(ls.data, i) + _, i = decodeString(ls.data, i) + if lName == prevName { + return lName, true + } + prevName = lName + } + return "", false +} + +// WithoutEmpty returns the labelset without empty labels. +// May return the same labelset. +func (ls Labels) WithoutEmpty() Labels { + for pos := 0; pos < len(ls.data); { + _, newPos := decodeString(ls.data, pos) + lValue, newPos := decodeString(ls.data, newPos) + if lValue != "" { + pos = newPos + continue + } + // Do not copy the slice until it's necessary. + // TODO: could optimise the case where all blanks are at the end. + // Note: we size the new buffer on the assumption there is exactly one blank value. + buf := make([]byte, pos, pos+(len(ls.data)-newPos)) + copy(buf, ls.data[:pos]) // copy the initial non-blank labels + pos = newPos // move past the first blank value + for pos < len(ls.data) { + var newPos int + _, newPos = decodeString(ls.data, pos) + lValue, newPos = decodeString(ls.data, newPos) + if lValue != "" { + buf = append(buf, ls.data[pos:newPos]...) + } + pos = newPos + } + return Labels{data: yoloString(buf)} + } + return ls +} + +// Equal returns whether the two label sets are equal. +func Equal(ls, o Labels) bool { + return ls.data == o.data +} + +// EmptyLabels returns an empty Labels value, for convenience. +func EmptyLabels() Labels { + return Labels{} +} +func yoloBytes(s string) []byte { + return unsafe.Slice(unsafe.StringData(s), len(s)) +} + +// New returns a sorted Labels from the given labels. +// The caller has to guarantee that all label names are unique. +func New(ls ...Label) Labels { + slices.SortFunc(ls, func(a, b Label) int { return strings.Compare(a.Name, b.Name) }) + size := labelsSize(ls) + buf := make([]byte, size) + marshalLabelsToSizedBuffer(ls, buf) + return Labels{data: yoloString(buf)} +} + +// FromStrings creates new labels from pairs of strings. +func FromStrings(ss ...string) Labels { + if len(ss)%2 != 0 { + panic("invalid number of strings") + } + ls := make([]Label, 0, len(ss)/2) + for i := 0; i < len(ss); i += 2 { + ls = append(ls, Label{Name: ss[i], Value: ss[i+1]}) + } + + return New(ls...) +} + +// Compare compares the two label sets. +// The result will be 0 if a==b, <0 if a < b, and >0 if a > b. +func Compare(a, b Labels) int { + // Find the first byte in the string where a and b differ. + shorter, longer := a.data, b.data + if len(b.data) < len(a.data) { + shorter, longer = b.data, a.data + } + i := 0 + // First, go 8 bytes at a time. Data strings are expected to be 8-byte aligned. + sp := unsafe.Pointer(unsafe.StringData(shorter)) + lp := unsafe.Pointer(unsafe.StringData(longer)) + for ; i < len(shorter)-8; i += 8 { + if *(*uint64)(unsafe.Add(sp, i)) != *(*uint64)(unsafe.Add(lp, i)) { + break + } + } + // Now go 1 byte at a time. + for ; i < len(shorter); i++ { + if shorter[i] != longer[i] { + break + } + } + if i == len(shorter) { + // One Labels was a prefix of the other; the set with fewer labels compares lower. + return len(a.data) - len(b.data) + } + + // Now we know that there is some difference before the end of a and b. + // Go back through the fields and find which field that difference is in. + firstCharDifferent, i := i, 0 + size, nextI := decodeSize(a.data, i) + for nextI+size <= firstCharDifferent { + i = nextI + size + size, nextI = decodeSize(a.data, i) + } + // Difference is inside this entry. + aStr, _ := decodeString(a.data, i) + bStr, _ := decodeString(b.data, i) + if aStr < bStr { + return -1 + } + return +1 +} + +// Copy labels from b on top of whatever was in ls previously, reusing memory or expanding if needed. +func (ls *Labels) CopyFrom(b Labels) { + ls.data = b.data // strings are immutable +} + +// IsEmpty returns true if ls represents an empty set of labels. +func (ls Labels) IsEmpty() bool { + return len(ls.data) == 0 +} + +// Len returns the number of labels; it is relatively slow. +func (ls Labels) Len() int { + count := 0 + for i := 0; i < len(ls.data); { + var size int + size, i = decodeSize(ls.data, i) + i += size + size, i = decodeSize(ls.data, i) + i += size + count++ + } + return count +} + +// Range calls f on each label. +func (ls Labels) Range(f func(l Label)) { + for i := 0; i < len(ls.data); { + var lName, lValue string + lName, i = decodeString(ls.data, i) + lValue, i = decodeString(ls.data, i) + f(Label{Name: lName, Value: lValue}) + } +} + +// Validate calls f on each label. If f returns a non-nil error, then it returns that error cancelling the iteration. +func (ls Labels) Validate(f func(l Label) error) error { + for i := 0; i < len(ls.data); { + var lName, lValue string + lName, i = decodeString(ls.data, i) + lValue, i = decodeString(ls.data, i) + err := f(Label{Name: lName, Value: lValue}) + if err != nil { + return err + } + } + return nil +} + +// DropMetricName returns Labels with "__name__" removed. +func (ls Labels) DropMetricName() Labels { + for i := 0; i < len(ls.data); { + lName, i2 := decodeString(ls.data, i) + size, i2 := decodeSize(ls.data, i2) + i2 += size + if lName == MetricName { + if i == 0 { // Make common case fast with no allocations. + ls.data = ls.data[i2:] + } else { + ls.data = ls.data[:i] + ls.data[i2:] + } + break + } else if lName[0] > MetricName[0] { // Stop looking if we've gone past. + break + } + i = i2 + } + return ls +} + +// InternStrings is a no-op because it would only save when the whole set of labels is identical. +func (ls *Labels) InternStrings(intern func(string) string) { +} + +// ReleaseStrings is a no-op for the same reason as InternStrings. +func (ls Labels) ReleaseStrings(release func(string)) { +} + +// Builder allows modifying Labels. +type Builder struct { + base Labels + del []string + add []Label +} + +// Reset clears all current state for the builder. +func (b *Builder) Reset(base Labels) { + b.base = base + b.del = b.del[:0] + b.add = b.add[:0] + b.base.Range(func(l Label) { + if l.Value == "" { + b.del = append(b.del, l.Name) + } + }) +} + +// Labels returns the labels from the builder. +// If no modifications were made, the original labels are returned. +func (b *Builder) Labels() Labels { + if len(b.del) == 0 && len(b.add) == 0 { + return b.base + } + + slices.SortFunc(b.add, func(a, b Label) int { return strings.Compare(a.Name, b.Name) }) + slices.Sort(b.del) + a, d := 0, 0 + + bufSize := len(b.base.data) + labelsSize(b.add) + buf := make([]byte, 0, bufSize) + for pos := 0; pos < len(b.base.data); { + oldPos := pos + var lName string + lName, pos = decodeString(b.base.data, pos) + _, pos = decodeString(b.base.data, pos) + for d < len(b.del) && b.del[d] < lName { + d++ + } + if d < len(b.del) && b.del[d] == lName { + continue // This label has been deleted. + } + for ; a < len(b.add) && b.add[a].Name < lName; a++ { + buf = appendLabelTo(buf, &b.add[a]) // Insert label that was not in the base set. + } + if a < len(b.add) && b.add[a].Name == lName { + buf = appendLabelTo(buf, &b.add[a]) + a++ + continue // This label has been replaced. + } + buf = append(buf, b.base.data[oldPos:pos]...) + } + // We have come to the end of the base set; add any remaining labels. + for ; a < len(b.add); a++ { + buf = appendLabelTo(buf, &b.add[a]) + } + return Labels{data: yoloString(buf)} +} + +func marshalLabelsToSizedBuffer(lbls []Label, data []byte) int { + i := len(data) + for index := len(lbls) - 1; index >= 0; index-- { + size := marshalLabelToSizedBuffer(&lbls[index], data[:i]) + i -= size + } + return len(data) - i +} + +func marshalLabelToSizedBuffer(m *Label, data []byte) int { + i := len(data) + i -= len(m.Value) + copy(data[i:], m.Value) + i = encodeSize(data, i, len(m.Value)) + i -= len(m.Name) + copy(data[i:], m.Name) + i = encodeSize(data, i, len(m.Name)) + return len(data) - i +} + +func sizeVarint(x uint64) (n int) { + // Most common case first + if x < 1<<7 { + return 1 + } + if x >= 1<<56 { + return 9 + } + if x >= 1<<28 { + x >>= 28 + n = 4 + } + if x >= 1<<14 { + x >>= 14 + n += 2 + } + if x >= 1<<7 { + n++ + } + return n + 1 +} + +func encodeVarint(data []byte, offset int, v uint64) int { + offset -= sizeVarint(v) + base := offset + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return base +} + +// Special code for the common case that a size is less than 128 +func encodeSize(data []byte, offset, v int) int { + if v < 1<<7 { + offset-- + data[offset] = uint8(v) + return offset + } + return encodeVarint(data, offset, uint64(v)) +} + +func labelsSize(lbls []Label) (n int) { + // we just encode name/value/name/value, without any extra tags or length bytes + for _, e := range lbls { + n += labelSize(&e) + } + return n +} + +func labelSize(m *Label) (n int) { + // strings are encoded as length followed by contents. + l := len(m.Name) + n += l + sizeVarint(uint64(l)) + l = len(m.Value) + n += l + sizeVarint(uint64(l)) + return n +} + +func appendLabelTo(buf []byte, m *Label) []byte { + size := labelSize(m) + sizeRequired := len(buf) + size + if cap(buf) >= sizeRequired { + buf = buf[:sizeRequired] + } else { + bufSize := cap(buf) + // Double size of buffer each time it needs to grow, to amortise copying cost. + for bufSize < sizeRequired { + bufSize = bufSize*2 + 1 + } + newBuf := make([]byte, sizeRequired, bufSize) + copy(newBuf, buf) + buf = newBuf + } + marshalLabelToSizedBuffer(m, buf) + return buf +} + +// ScratchBuilder allows efficient construction of a Labels from scratch. +type ScratchBuilder struct { + add []Label + output Labels + overwriteBuffer []byte +} + +// NewScratchBuilder creates a ScratchBuilder initialized for Labels with n entries. +func NewScratchBuilder(n int) ScratchBuilder { + return ScratchBuilder{add: make([]Label, 0, n)} +} + +func (b *ScratchBuilder) Reset() { + b.add = b.add[:0] + b.output = EmptyLabels() +} + +// Add a name/value pair. +// Note if you Add the same name twice you will get a duplicate label, which is invalid. +func (b *ScratchBuilder) Add(name, value string) { + b.add = append(b.add, Label{Name: name, Value: value}) +} + +// Add a name/value pair, using []byte instead of string to reduce memory allocations. +// The values must remain live until Labels() is called. +func (b *ScratchBuilder) UnsafeAddBytes(name, value []byte) { + b.add = append(b.add, Label{Name: yoloString(name), Value: yoloString(value)}) +} + +// Sort the labels added so far by name. +func (b *ScratchBuilder) Sort() { + slices.SortFunc(b.add, func(a, b Label) int { return strings.Compare(a.Name, b.Name) }) +} + +// Assign is for when you already have a Labels which you want this ScratchBuilder to return. +func (b *ScratchBuilder) Assign(l Labels) { + b.output = l +} + +// Labels returns the name/value pairs added as a Labels object. Calling Add() after Labels() has no effect. +// Note: if you want them sorted, call Sort() first. +func (b *ScratchBuilder) Labels() Labels { + if b.output.IsEmpty() { + size := labelsSize(b.add) + buf := make([]byte, size) + marshalLabelsToSizedBuffer(b.add, buf) + b.output = Labels{data: yoloString(buf)} + } + return b.output +} + +// Write the newly-built Labels out to ls, reusing an internal buffer. +// Callers must ensure that there are no other references to ls, or any strings fetched from it. +func (b *ScratchBuilder) Overwrite(ls *Labels) { + size := labelsSize(b.add) + if size <= cap(b.overwriteBuffer) { + b.overwriteBuffer = b.overwriteBuffer[:size] + } else { + b.overwriteBuffer = make([]byte, size) + } + marshalLabelsToSizedBuffer(b.add, b.overwriteBuffer) + ls.data = yoloString(b.overwriteBuffer) +} + +// Symbol-table is no-op, just for api parity with dedupelabels. +type SymbolTable struct{} + +func NewSymbolTable() *SymbolTable { return nil } + +func (t *SymbolTable) Len() int { return 0 } + +// NewBuilderWithSymbolTable creates a Builder, for api parity with dedupelabels. +func NewBuilderWithSymbolTable(_ *SymbolTable) *Builder { + return NewBuilder(EmptyLabels()) +} + +// NewScratchBuilderWithSymbolTable creates a ScratchBuilder, for api parity with dedupelabels. +func NewScratchBuilderWithSymbolTable(_ *SymbolTable, n int) ScratchBuilder { + return NewScratchBuilder(n) +} + +func (b *ScratchBuilder) SetSymbolTable(_ *SymbolTable) { + // no-op +} diff --git a/vendor/github.com/prometheus/prometheus/model/labels/matcher.go b/vendor/github.com/prometheus/prometheus/model/labels/matcher.go new file mode 100644 index 00000000..a09c838e --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/model/labels/matcher.go @@ -0,0 +1,170 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package labels + +import ( + "bytes" + "strconv" +) + +// MatchType is an enum for label matching types. +type MatchType int + +// Possible MatchTypes. +const ( + MatchEqual MatchType = iota + MatchNotEqual + MatchRegexp + MatchNotRegexp +) + +var matchTypeToStr = [...]string{ + MatchEqual: "=", + MatchNotEqual: "!=", + MatchRegexp: "=~", + MatchNotRegexp: "!~", +} + +func (m MatchType) String() string { + if m < MatchEqual || m > MatchNotRegexp { + panic("unknown match type") + } + return matchTypeToStr[m] +} + +// Matcher models the matching of a label. +type Matcher struct { + Type MatchType + Name string + Value string + + re *FastRegexMatcher +} + +// NewMatcher returns a matcher object. +func NewMatcher(t MatchType, n, v string) (*Matcher, error) { + m := &Matcher{ + Type: t, + Name: n, + Value: v, + } + if t == MatchRegexp || t == MatchNotRegexp { + re, err := NewFastRegexMatcher(v) + if err != nil { + return nil, err + } + m.re = re + } + return m, nil +} + +// MustNewMatcher panics on error - only for use in tests! +func MustNewMatcher(mt MatchType, name, val string) *Matcher { + m, err := NewMatcher(mt, name, val) + if err != nil { + panic(err) + } + return m +} + +func (m *Matcher) String() string { + // Start a buffer with a pre-allocated size on stack to cover most needs. + var bytea [1024]byte + b := bytes.NewBuffer(bytea[:0]) + + if m.shouldQuoteName() { + b.Write(strconv.AppendQuote(b.AvailableBuffer(), m.Name)) + } else { + b.WriteString(m.Name) + } + b.WriteString(m.Type.String()) + b.Write(strconv.AppendQuote(b.AvailableBuffer(), m.Value)) + + return b.String() +} + +func (m *Matcher) shouldQuoteName() bool { + for i, c := range m.Name { + if c == '_' || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (i > 0 && c >= '0' && c <= '9') { + continue + } + return true + } + return len(m.Name) == 0 +} + +// Matches returns whether the matcher matches the given string value. +func (m *Matcher) Matches(s string) bool { + switch m.Type { + case MatchEqual: + return s == m.Value + case MatchNotEqual: + return s != m.Value + case MatchRegexp: + return m.re.MatchString(s) + case MatchNotRegexp: + return !m.re.MatchString(s) + } + panic("labels.Matcher.Matches: invalid match type") +} + +// Inverse returns a matcher that matches the opposite. +func (m *Matcher) Inverse() (*Matcher, error) { + switch m.Type { + case MatchEqual: + return NewMatcher(MatchNotEqual, m.Name, m.Value) + case MatchNotEqual: + return NewMatcher(MatchEqual, m.Name, m.Value) + case MatchRegexp: + return NewMatcher(MatchNotRegexp, m.Name, m.Value) + case MatchNotRegexp: + return NewMatcher(MatchRegexp, m.Name, m.Value) + } + panic("labels.Matcher.Matches: invalid match type") +} + +// GetRegexString returns the regex string. +func (m *Matcher) GetRegexString() string { + if m.re == nil { + return "" + } + return m.re.GetRegexString() +} + +// SetMatches returns a set of equality matchers for the current regex matchers if possible. +// For examples the regexp `a(b|f)` will returns "ab" and "af". +// Returns nil if we can't replace the regexp by only equality matchers. +func (m *Matcher) SetMatches() []string { + if m.re == nil { + return nil + } + return m.re.SetMatches() +} + +// Prefix returns the required prefix of the value to match, if possible. +// It will be empty if it's an equality matcher or if the prefix can't be determined. +func (m *Matcher) Prefix() string { + if m.re == nil { + return "" + } + return m.re.prefix +} + +// IsRegexOptimized returns whether regex is optimized. +func (m *Matcher) IsRegexOptimized() bool { + if m.re == nil { + return false + } + return m.re.IsOptimized() +} diff --git a/vendor/github.com/prometheus/prometheus/model/labels/regexp.go b/vendor/github.com/prometheus/prometheus/model/labels/regexp.go new file mode 100644 index 00000000..3df94351 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/model/labels/regexp.go @@ -0,0 +1,1086 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package labels + +import ( + "slices" + "strings" + "unicode" + "unicode/utf8" + + "github.com/grafana/regexp" + "github.com/grafana/regexp/syntax" + "golang.org/x/text/unicode/norm" +) + +const ( + maxSetMatches = 256 + + // The minimum number of alternate values a regex should have to trigger + // the optimization done by optimizeEqualOrPrefixStringMatchers() and so use a map + // to match values instead of iterating over a list. This value has + // been computed running BenchmarkOptimizeEqualStringMatchers. + minEqualMultiStringMatcherMapThreshold = 16 +) + +type FastRegexMatcher struct { + // Under some conditions, re is nil because the expression is never parsed. + // We store the original string to be able to return it in GetRegexString(). + reString string + re *regexp.Regexp + + setMatches []string + stringMatcher StringMatcher + prefix string + suffix string + contains []string + + // matchString is the "compiled" function to run by MatchString(). + matchString func(string) bool +} + +func NewFastRegexMatcher(v string) (*FastRegexMatcher, error) { + m := &FastRegexMatcher{ + reString: v, + } + + m.stringMatcher, m.setMatches = optimizeAlternatingLiterals(v) + if m.stringMatcher != nil { + // If we already have a string matcher, we don't need to parse the regex + // or compile the matchString function. This also avoids the behavior in + // compileMatchStringFunction where it prefers to use setMatches when + // available, even if the string matcher is faster. + m.matchString = m.stringMatcher.Matches + } else { + parsed, err := syntax.Parse(v, syntax.Perl|syntax.DotNL) + if err != nil { + return nil, err + } + // Simplify the syntax tree to run faster. + parsed = parsed.Simplify() + m.re, err = regexp.Compile("^(?s:" + parsed.String() + ")$") + if err != nil { + return nil, err + } + if parsed.Op == syntax.OpConcat { + m.prefix, m.suffix, m.contains = optimizeConcatRegex(parsed) + } + if matches, caseSensitive := findSetMatches(parsed); caseSensitive { + m.setMatches = matches + } + m.stringMatcher = stringMatcherFromRegexp(parsed) + m.matchString = m.compileMatchStringFunction() + } + + return m, nil +} + +// compileMatchStringFunction returns the function to run by MatchString(). +func (m *FastRegexMatcher) compileMatchStringFunction() func(string) bool { + // If the only optimization available is the string matcher, then we can just run it. + if len(m.setMatches) == 0 && m.prefix == "" && m.suffix == "" && len(m.contains) == 0 && m.stringMatcher != nil { + return m.stringMatcher.Matches + } + + return func(s string) bool { + if len(m.setMatches) != 0 { + for _, match := range m.setMatches { + if match == s { + return true + } + } + return false + } + if m.prefix != "" && !strings.HasPrefix(s, m.prefix) { + return false + } + if m.suffix != "" && !strings.HasSuffix(s, m.suffix) { + return false + } + if len(m.contains) > 0 && !containsInOrder(s, m.contains) { + return false + } + if m.stringMatcher != nil { + return m.stringMatcher.Matches(s) + } + return m.re.MatchString(s) + } +} + +// IsOptimized returns true if any fast-path optimization is applied to the +// regex matcher. +func (m *FastRegexMatcher) IsOptimized() bool { + return len(m.setMatches) > 0 || m.stringMatcher != nil || m.prefix != "" || m.suffix != "" || len(m.contains) > 0 +} + +// findSetMatches extract equality matches from a regexp. +// Returns nil if we can't replace the regexp by only equality matchers or the regexp contains +// a mix of case sensitive and case insensitive matchers. +func findSetMatches(re *syntax.Regexp) (matches []string, caseSensitive bool) { + clearBeginEndText(re) + + return findSetMatchesInternal(re, "") +} + +func findSetMatchesInternal(re *syntax.Regexp, base string) (matches []string, caseSensitive bool) { + switch re.Op { + case syntax.OpBeginText: + // Correctly handling the begin text operator inside a regex is tricky, + // so in this case we fallback to the regex engine. + return nil, false + case syntax.OpEndText: + // Correctly handling the end text operator inside a regex is tricky, + // so in this case we fallback to the regex engine. + return nil, false + case syntax.OpLiteral: + return []string{base + string(re.Rune)}, isCaseSensitive(re) + case syntax.OpEmptyMatch: + if base != "" { + return []string{base}, isCaseSensitive(re) + } + case syntax.OpAlternate: + return findSetMatchesFromAlternate(re, base) + case syntax.OpCapture: + clearCapture(re) + return findSetMatchesInternal(re, base) + case syntax.OpConcat: + return findSetMatchesFromConcat(re, base) + case syntax.OpCharClass: + if len(re.Rune)%2 != 0 { + return nil, false + } + var matches []string + var totalSet int + for i := 0; i+1 < len(re.Rune); i += 2 { + totalSet += int(re.Rune[i+1]-re.Rune[i]) + 1 + } + // limits the total characters that can be used to create matches. + // In some case like negation [^0-9] a lot of possibilities exists and that + // can create thousands of possible matches at which points we're better off using regexp. + if totalSet > maxSetMatches { + return nil, false + } + for i := 0; i+1 < len(re.Rune); i += 2 { + lo, hi := re.Rune[i], re.Rune[i+1] + for c := lo; c <= hi; c++ { + matches = append(matches, base+string(c)) + } + } + return matches, isCaseSensitive(re) + default: + return nil, false + } + return nil, false +} + +func findSetMatchesFromConcat(re *syntax.Regexp, base string) (matches []string, matchesCaseSensitive bool) { + if len(re.Sub) == 0 { + return nil, false + } + clearCapture(re.Sub...) + + matches = []string{base} + + for i := 0; i < len(re.Sub); i++ { + var newMatches []string + for j, b := range matches { + m, caseSensitive := findSetMatchesInternal(re.Sub[i], b) + if m == nil { + return nil, false + } + if tooManyMatches(newMatches, m...) { + return nil, false + } + + // All matches must have the same case sensitivity. If it's the first set of matches + // returned, we store its sensitivity as the expected case, and then we'll check all + // other ones. + if i == 0 && j == 0 { + matchesCaseSensitive = caseSensitive + } + if matchesCaseSensitive != caseSensitive { + return nil, false + } + + newMatches = append(newMatches, m...) + } + matches = newMatches + } + + return matches, matchesCaseSensitive +} + +func findSetMatchesFromAlternate(re *syntax.Regexp, base string) (matches []string, matchesCaseSensitive bool) { + for i, sub := range re.Sub { + found, caseSensitive := findSetMatchesInternal(sub, base) + if found == nil { + return nil, false + } + if tooManyMatches(matches, found...) { + return nil, false + } + + // All matches must have the same case sensitivity. If it's the first set of matches + // returned, we store its sensitivity as the expected case, and then we'll check all + // other ones. + if i == 0 { + matchesCaseSensitive = caseSensitive + } + if matchesCaseSensitive != caseSensitive { + return nil, false + } + + matches = append(matches, found...) + } + + return matches, matchesCaseSensitive +} + +// clearCapture removes capture operation as they are not used for matching. +func clearCapture(regs ...*syntax.Regexp) { + for _, r := range regs { + // Iterate on the regexp because capture groups could be nested. + for r.Op == syntax.OpCapture { + *r = *r.Sub[0] + } + } +} + +// clearBeginEndText removes the begin and end text from the regexp. Prometheus regexp are anchored to the beginning and end of the string. +func clearBeginEndText(re *syntax.Regexp) { + // Do not clear begin/end text from an alternate operator because it could + // change the actual regexp properties. + if re.Op == syntax.OpAlternate { + return + } + + if len(re.Sub) == 0 { + return + } + if len(re.Sub) == 1 { + if re.Sub[0].Op == syntax.OpBeginText || re.Sub[0].Op == syntax.OpEndText { + // We need to remove this element. Since it's the only one, we convert into a matcher of an empty string. + // OpEmptyMatch is regexp's nop operator. + re.Op = syntax.OpEmptyMatch + re.Sub = nil + return + } + } + if re.Sub[0].Op == syntax.OpBeginText { + re.Sub = re.Sub[1:] + } + if re.Sub[len(re.Sub)-1].Op == syntax.OpEndText { + re.Sub = re.Sub[:len(re.Sub)-1] + } +} + +// isCaseInsensitive tells if a regexp is case insensitive. +// The flag should be check at each level of the syntax tree. +func isCaseInsensitive(reg *syntax.Regexp) bool { + return (reg.Flags & syntax.FoldCase) != 0 +} + +// isCaseSensitive tells if a regexp is case sensitive. +// The flag should be check at each level of the syntax tree. +func isCaseSensitive(reg *syntax.Regexp) bool { + return !isCaseInsensitive(reg) +} + +// tooManyMatches guards against creating too many set matches. +func tooManyMatches(matches []string, added ...string) bool { + return len(matches)+len(added) > maxSetMatches +} + +func (m *FastRegexMatcher) MatchString(s string) bool { + return m.matchString(s) +} + +func (m *FastRegexMatcher) SetMatches() []string { + // IMPORTANT: always return a copy, otherwise if the caller manipulate this slice it will + // also get manipulated in the cached FastRegexMatcher instance. + return slices.Clone(m.setMatches) +} + +func (m *FastRegexMatcher) GetRegexString() string { + return m.reString +} + +// optimizeAlternatingLiterals optimizes a regex of the form +// +// `literal1|literal2|literal3|...` +// +// this function returns an optimized StringMatcher or nil if the regex +// cannot be optimized in this way, and a list of setMatches up to maxSetMatches. +func optimizeAlternatingLiterals(s string) (StringMatcher, []string) { + if len(s) == 0 { + return emptyStringMatcher{}, nil + } + + estimatedAlternates := strings.Count(s, "|") + 1 + + // If there are no alternates, check if the string is a literal + if estimatedAlternates == 1 { + if regexp.QuoteMeta(s) == s { + return &equalStringMatcher{s: s, caseSensitive: true}, []string{s} + } + return nil, nil + } + + multiMatcher := newEqualMultiStringMatcher(true, estimatedAlternates, 0, 0) + + for end := strings.IndexByte(s, '|'); end > -1; end = strings.IndexByte(s, '|') { + // Split the string into the next literal and the remainder + subMatch := s[:end] + s = s[end+1:] + + // break if any of the submatches are not literals + if regexp.QuoteMeta(subMatch) != subMatch { + return nil, nil + } + + multiMatcher.add(subMatch) + } + + // break if the remainder is not a literal + if regexp.QuoteMeta(s) != s { + return nil, nil + } + multiMatcher.add(s) + + return multiMatcher, multiMatcher.setMatches() +} + +// optimizeConcatRegex returns literal prefix/suffix text that can be safely +// checked against the label value before running the regexp matcher. +func optimizeConcatRegex(r *syntax.Regexp) (prefix, suffix string, contains []string) { + sub := r.Sub + clearCapture(sub...) + + // We can safely remove begin and end text matchers respectively + // at the beginning and end of the regexp. + if len(sub) > 0 && sub[0].Op == syntax.OpBeginText { + sub = sub[1:] + } + if len(sub) > 0 && sub[len(sub)-1].Op == syntax.OpEndText { + sub = sub[:len(sub)-1] + } + + if len(sub) == 0 { + return + } + + // Given Prometheus regex matchers are always anchored to the begin/end + // of the text, if the first/last operations are literals, we can safely + // treat them as prefix/suffix. + if sub[0].Op == syntax.OpLiteral && (sub[0].Flags&syntax.FoldCase) == 0 { + prefix = string(sub[0].Rune) + } + if last := len(sub) - 1; sub[last].Op == syntax.OpLiteral && (sub[last].Flags&syntax.FoldCase) == 0 { + suffix = string(sub[last].Rune) + } + + // If contains any literal which is not a prefix/suffix, we keep track of + // all the ones which are case-sensitive. + for i := 1; i < len(sub)-1; i++ { + if sub[i].Op == syntax.OpLiteral && (sub[i].Flags&syntax.FoldCase) == 0 { + contains = append(contains, string(sub[i].Rune)) + } + } + + return +} + +// StringMatcher is a matcher that matches a string in place of a regular expression. +type StringMatcher interface { + Matches(s string) bool +} + +// stringMatcherFromRegexp attempts to replace a common regexp with a string matcher. +// It returns nil if the regexp is not supported. +func stringMatcherFromRegexp(re *syntax.Regexp) StringMatcher { + clearBeginEndText(re) + + m := stringMatcherFromRegexpInternal(re) + m = optimizeEqualOrPrefixStringMatchers(m, minEqualMultiStringMatcherMapThreshold) + + return m +} + +func stringMatcherFromRegexpInternal(re *syntax.Regexp) StringMatcher { + clearCapture(re) + + switch re.Op { + case syntax.OpBeginText: + // Correctly handling the begin text operator inside a regex is tricky, + // so in this case we fallback to the regex engine. + return nil + case syntax.OpEndText: + // Correctly handling the end text operator inside a regex is tricky, + // so in this case we fallback to the regex engine. + return nil + case syntax.OpPlus: + if re.Sub[0].Op != syntax.OpAnyChar && re.Sub[0].Op != syntax.OpAnyCharNotNL { + return nil + } + return &anyNonEmptyStringMatcher{ + matchNL: re.Sub[0].Op == syntax.OpAnyChar, + } + case syntax.OpStar: + if re.Sub[0].Op != syntax.OpAnyChar && re.Sub[0].Op != syntax.OpAnyCharNotNL { + return nil + } + + // If the newline is valid, than this matcher literally match any string (even empty). + if re.Sub[0].Op == syntax.OpAnyChar { + return trueMatcher{} + } + + // Any string is fine (including an empty one), as far as it doesn't contain any newline. + return anyStringWithoutNewlineMatcher{} + case syntax.OpQuest: + // Only optimize for ".?". + if len(re.Sub) != 1 || (re.Sub[0].Op != syntax.OpAnyChar && re.Sub[0].Op != syntax.OpAnyCharNotNL) { + return nil + } + + return &zeroOrOneCharacterStringMatcher{ + matchNL: re.Sub[0].Op == syntax.OpAnyChar, + } + case syntax.OpEmptyMatch: + return emptyStringMatcher{} + + case syntax.OpLiteral: + return &equalStringMatcher{ + s: string(re.Rune), + caseSensitive: !isCaseInsensitive(re), + } + case syntax.OpAlternate: + or := make([]StringMatcher, 0, len(re.Sub)) + for _, sub := range re.Sub { + m := stringMatcherFromRegexpInternal(sub) + if m == nil { + return nil + } + or = append(or, m) + } + return orStringMatcher(or) + case syntax.OpConcat: + clearCapture(re.Sub...) + + if len(re.Sub) == 0 { + return emptyStringMatcher{} + } + if len(re.Sub) == 1 { + return stringMatcherFromRegexpInternal(re.Sub[0]) + } + + var left, right StringMatcher + + // Let's try to find if there's a first and last any matchers. + if re.Sub[0].Op == syntax.OpPlus || re.Sub[0].Op == syntax.OpStar || re.Sub[0].Op == syntax.OpQuest { + left = stringMatcherFromRegexpInternal(re.Sub[0]) + if left == nil { + return nil + } + re.Sub = re.Sub[1:] + } + if re.Sub[len(re.Sub)-1].Op == syntax.OpPlus || re.Sub[len(re.Sub)-1].Op == syntax.OpStar || re.Sub[len(re.Sub)-1].Op == syntax.OpQuest { + right = stringMatcherFromRegexpInternal(re.Sub[len(re.Sub)-1]) + if right == nil { + return nil + } + re.Sub = re.Sub[:len(re.Sub)-1] + } + + matches, matchesCaseSensitive := findSetMatchesInternal(re, "") + + if len(matches) == 0 && len(re.Sub) == 2 { + // We have not find fixed set matches. We look for other known cases that + // we can optimize. + switch { + // Prefix is literal. + case right == nil && re.Sub[0].Op == syntax.OpLiteral: + right = stringMatcherFromRegexpInternal(re.Sub[1]) + if right != nil { + matches = []string{string(re.Sub[0].Rune)} + matchesCaseSensitive = !isCaseInsensitive(re.Sub[0]) + } + + // Suffix is literal. + case left == nil && re.Sub[1].Op == syntax.OpLiteral: + left = stringMatcherFromRegexpInternal(re.Sub[0]) + if left != nil { + matches = []string{string(re.Sub[1].Rune)} + matchesCaseSensitive = !isCaseInsensitive(re.Sub[1]) + } + } + } + + // Ensure we've found some literals to match (optionally with a left and/or right matcher). + // If not, then this optimization doesn't trigger. + if len(matches) == 0 { + return nil + } + + // Use the right (and best) matcher based on what we've found. + switch { + // No left and right matchers (only fixed set matches). + case left == nil && right == nil: + // if there's no any matchers on both side it's a concat of literals + or := make([]StringMatcher, 0, len(matches)) + for _, match := range matches { + or = append(or, &equalStringMatcher{ + s: match, + caseSensitive: matchesCaseSensitive, + }) + } + return orStringMatcher(or) + + // Right matcher with 1 fixed set match. + case left == nil && len(matches) == 1: + return newLiteralPrefixStringMatcher(matches[0], matchesCaseSensitive, right) + + // Left matcher with 1 fixed set match. + case right == nil && len(matches) == 1: + return &literalSuffixStringMatcher{ + left: left, + suffix: matches[0], + suffixCaseSensitive: matchesCaseSensitive, + } + + // We found literals in the middle. We can trigger the fast path only if + // the matches are case sensitive because containsStringMatcher doesn't + // support case insensitive. + case matchesCaseSensitive: + return &containsStringMatcher{ + substrings: matches, + left: left, + right: right, + } + } + } + return nil +} + +// containsStringMatcher matches a string if it contains any of the substrings. +// If left and right are not nil, it's a contains operation where left and right must match. +// If left is nil, it's a hasPrefix operation and right must match. +// Finally, if right is nil it's a hasSuffix operation and left must match. +type containsStringMatcher struct { + // The matcher that must match the left side. Can be nil. + left StringMatcher + + // At least one of these strings must match in the "middle", between left and right matchers. + substrings []string + + // The matcher that must match the right side. Can be nil. + right StringMatcher +} + +func (m *containsStringMatcher) Matches(s string) bool { + for _, substr := range m.substrings { + switch { + case m.right != nil && m.left != nil: + searchStartPos := 0 + + for { + pos := strings.Index(s[searchStartPos:], substr) + if pos < 0 { + break + } + + // Since we started searching from searchStartPos, we have to add that offset + // to get the actual position of the substring inside the text. + pos += searchStartPos + + // If both the left and right matchers match, then we can stop searching because + // we've found a match. + if m.left.Matches(s[:pos]) && m.right.Matches(s[pos+len(substr):]) { + return true + } + + // Continue searching for another occurrence of the substring inside the text. + searchStartPos = pos + 1 + } + case m.left != nil: + // If we have to check for characters on the left then we need to match a suffix. + if strings.HasSuffix(s, substr) && m.left.Matches(s[:len(s)-len(substr)]) { + return true + } + case m.right != nil: + if strings.HasPrefix(s, substr) && m.right.Matches(s[len(substr):]) { + return true + } + } + } + return false +} + +func newLiteralPrefixStringMatcher(prefix string, prefixCaseSensitive bool, right StringMatcher) StringMatcher { + if prefixCaseSensitive { + return &literalPrefixSensitiveStringMatcher{ + prefix: prefix, + right: right, + } + } + + return &literalPrefixInsensitiveStringMatcher{ + prefix: prefix, + right: right, + } +} + +// literalPrefixSensitiveStringMatcher matches a string with the given literal case-sensitive prefix and right side matcher. +type literalPrefixSensitiveStringMatcher struct { + prefix string + + // The matcher that must match the right side. Can be nil. + right StringMatcher +} + +func (m *literalPrefixSensitiveStringMatcher) Matches(s string) bool { + if !strings.HasPrefix(s, m.prefix) { + return false + } + + // Ensure the right side matches. + return m.right.Matches(s[len(m.prefix):]) +} + +// literalPrefixInsensitiveStringMatcher matches a string with the given literal case-insensitive prefix and right side matcher. +type literalPrefixInsensitiveStringMatcher struct { + prefix string + + // The matcher that must match the right side. Can be nil. + right StringMatcher +} + +func (m *literalPrefixInsensitiveStringMatcher) Matches(s string) bool { + if !hasPrefixCaseInsensitive(s, m.prefix) { + return false + } + + // Ensure the right side matches. + return m.right.Matches(s[len(m.prefix):]) +} + +// literalSuffixStringMatcher matches a string with the given literal suffix and left side matcher. +type literalSuffixStringMatcher struct { + // The matcher that must match the left side. Can be nil. + left StringMatcher + + suffix string + suffixCaseSensitive bool +} + +func (m *literalSuffixStringMatcher) Matches(s string) bool { + // Ensure the suffix matches. + if m.suffixCaseSensitive && !strings.HasSuffix(s, m.suffix) { + return false + } + if !m.suffixCaseSensitive && !hasSuffixCaseInsensitive(s, m.suffix) { + return false + } + + // Ensure the left side matches. + return m.left.Matches(s[:len(s)-len(m.suffix)]) +} + +// emptyStringMatcher matches an empty string. +type emptyStringMatcher struct{} + +func (m emptyStringMatcher) Matches(s string) bool { + return len(s) == 0 +} + +// orStringMatcher matches any of the sub-matchers. +type orStringMatcher []StringMatcher + +func (m orStringMatcher) Matches(s string) bool { + for _, matcher := range m { + if matcher.Matches(s) { + return true + } + } + return false +} + +// equalStringMatcher matches a string exactly and support case insensitive. +type equalStringMatcher struct { + s string + caseSensitive bool +} + +func (m *equalStringMatcher) Matches(s string) bool { + if m.caseSensitive { + return m.s == s + } + return strings.EqualFold(m.s, s) +} + +type multiStringMatcherBuilder interface { + StringMatcher + add(s string) + addPrefix(prefix string, prefixCaseSensitive bool, matcher StringMatcher) + setMatches() []string +} + +func newEqualMultiStringMatcher(caseSensitive bool, estimatedSize, estimatedPrefixes, minPrefixLength int) multiStringMatcherBuilder { + // If the estimated size is low enough, it's faster to use a slice instead of a map. + if estimatedSize < minEqualMultiStringMatcherMapThreshold && estimatedPrefixes == 0 { + return &equalMultiStringSliceMatcher{caseSensitive: caseSensitive, values: make([]string, 0, estimatedSize)} + } + + return &equalMultiStringMapMatcher{ + values: make(map[string]struct{}, estimatedSize), + prefixes: make(map[string][]StringMatcher, estimatedPrefixes), + minPrefixLen: minPrefixLength, + caseSensitive: caseSensitive, + } +} + +// equalMultiStringSliceMatcher matches a string exactly against a slice of valid values. +type equalMultiStringSliceMatcher struct { + values []string + + caseSensitive bool +} + +func (m *equalMultiStringSliceMatcher) add(s string) { + m.values = append(m.values, s) +} + +func (m *equalMultiStringSliceMatcher) addPrefix(_ string, _ bool, _ StringMatcher) { + panic("not implemented") +} + +func (m *equalMultiStringSliceMatcher) setMatches() []string { + return m.values +} + +func (m *equalMultiStringSliceMatcher) Matches(s string) bool { + if m.caseSensitive { + for _, v := range m.values { + if s == v { + return true + } + } + } else { + for _, v := range m.values { + if strings.EqualFold(s, v) { + return true + } + } + } + return false +} + +// equalMultiStringMapMatcher matches a string exactly against a map of valid values +// or against a set of prefix matchers. +type equalMultiStringMapMatcher struct { + // values contains values to match a string against. If the matching is case insensitive, + // the values here must be lowercase. + values map[string]struct{} + // prefixes maps strings, all of length minPrefixLen, to sets of matchers to check the rest of the string. + // If the matching is case insensitive, prefixes are all lowercase. + prefixes map[string][]StringMatcher + // minPrefixLen can be zero, meaning there are no prefix matchers. + minPrefixLen int + caseSensitive bool +} + +func (m *equalMultiStringMapMatcher) add(s string) { + if !m.caseSensitive { + s = toNormalisedLower(s) + } + + m.values[s] = struct{}{} +} + +func (m *equalMultiStringMapMatcher) addPrefix(prefix string, prefixCaseSensitive bool, matcher StringMatcher) { + if m.minPrefixLen == 0 { + panic("addPrefix called when no prefix length defined") + } + if len(prefix) < m.minPrefixLen { + panic("addPrefix called with a too short prefix") + } + if m.caseSensitive != prefixCaseSensitive { + panic("addPrefix called with a prefix whose case sensitivity is different than the expected one") + } + + s := prefix[:m.minPrefixLen] + if !m.caseSensitive { + s = strings.ToLower(s) + } + + m.prefixes[s] = append(m.prefixes[s], matcher) +} + +func (m *equalMultiStringMapMatcher) setMatches() []string { + if len(m.values) >= maxSetMatches || len(m.prefixes) > 0 { + return nil + } + + matches := make([]string, 0, len(m.values)) + for s := range m.values { + matches = append(matches, s) + } + return matches +} + +func (m *equalMultiStringMapMatcher) Matches(s string) bool { + if !m.caseSensitive { + s = toNormalisedLower(s) + } + + if _, ok := m.values[s]; ok { + return true + } + if m.minPrefixLen > 0 && len(s) >= m.minPrefixLen { + for _, matcher := range m.prefixes[s[:m.minPrefixLen]] { + if matcher.Matches(s) { + return true + } + } + } + return false +} + +// toNormalisedLower normalise the input string using "Unicode Normalization Form D" and then convert +// it to lower case. +func toNormalisedLower(s string) string { + var buf []byte + for i := 0; i < len(s); i++ { + c := s[i] + if c >= utf8.RuneSelf { + return strings.Map(unicode.ToLower, norm.NFKD.String(s)) + } + if 'A' <= c && c <= 'Z' { + if buf == nil { + buf = []byte(s) + } + buf[i] = c + 'a' - 'A' + } + } + if buf == nil { + return s + } + return yoloString(buf) +} + +// anyStringWithoutNewlineMatcher is a stringMatcher which matches any string +// (including an empty one) as far as it doesn't contain any newline character. +type anyStringWithoutNewlineMatcher struct{} + +func (m anyStringWithoutNewlineMatcher) Matches(s string) bool { + // We need to make sure it doesn't contain a newline. Since the newline is + // an ASCII character, we can use strings.IndexByte(). + return strings.IndexByte(s, '\n') == -1 +} + +// anyNonEmptyStringMatcher is a stringMatcher which matches any non-empty string. +type anyNonEmptyStringMatcher struct { + matchNL bool +} + +func (m *anyNonEmptyStringMatcher) Matches(s string) bool { + if m.matchNL { + // It's OK if the string contains a newline so we just need to make + // sure it's non-empty. + return len(s) > 0 + } + + // We need to make sure it non-empty and doesn't contain a newline. + // Since the newline is an ASCII character, we can use strings.IndexByte(). + return len(s) > 0 && strings.IndexByte(s, '\n') == -1 +} + +// zeroOrOneCharacterStringMatcher is a StringMatcher which matches zero or one occurrence +// of any character. The newline character is matches only if matchNL is set to true. +type zeroOrOneCharacterStringMatcher struct { + matchNL bool +} + +func (m *zeroOrOneCharacterStringMatcher) Matches(s string) bool { + // If there's more than one rune in the string, then it can't match. + if r, size := utf8.DecodeRuneInString(s); r == utf8.RuneError { + // Size is 0 for empty strings, 1 for invalid rune. + // Empty string matches, invalid rune matches if there isn't anything else. + return size == len(s) + } else if size < len(s) { + return false + } + + // No need to check for the newline if the string is empty or matching a newline is OK. + if m.matchNL || len(s) == 0 { + return true + } + + return s[0] != '\n' +} + +// trueMatcher is a stringMatcher which matches any string (always returns true). +type trueMatcher struct{} + +func (m trueMatcher) Matches(_ string) bool { + return true +} + +// optimizeEqualOrPrefixStringMatchers optimize a specific case where all matchers are made by an +// alternation (orStringMatcher) of strings checked for equality (equalStringMatcher) or +// with a literal prefix (literalPrefixSensitiveStringMatcher or literalPrefixInsensitiveStringMatcher). +// +// In this specific case, when we have many strings to match against we can use a map instead +// of iterating over the list of strings. +func optimizeEqualOrPrefixStringMatchers(input StringMatcher, threshold int) StringMatcher { + var ( + caseSensitive bool + caseSensitiveSet bool + numValues int + numPrefixes int + minPrefixLength int + ) + + // Analyse the input StringMatcher to count the number of occurrences + // and ensure all of them have the same case sensitivity. + analyseEqualMatcherCallback := func(matcher *equalStringMatcher) bool { + // Ensure we don't have mixed case sensitivity. + if caseSensitiveSet && caseSensitive != matcher.caseSensitive { + return false + } else if !caseSensitiveSet { + caseSensitive = matcher.caseSensitive + caseSensitiveSet = true + } + + numValues++ + return true + } + + analysePrefixMatcherCallback := func(prefix string, prefixCaseSensitive bool, matcher StringMatcher) bool { + // Ensure we don't have mixed case sensitivity. + if caseSensitiveSet && caseSensitive != prefixCaseSensitive { + return false + } else if !caseSensitiveSet { + caseSensitive = prefixCaseSensitive + caseSensitiveSet = true + } + if numPrefixes == 0 || len(prefix) < minPrefixLength { + minPrefixLength = len(prefix) + } + + numPrefixes++ + return true + } + + if !findEqualOrPrefixStringMatchers(input, analyseEqualMatcherCallback, analysePrefixMatcherCallback) { + return input + } + + // If the number of values and prefixes found is less than the threshold, then we should skip the optimization. + if (numValues + numPrefixes) < threshold { + return input + } + + // Parse again the input StringMatcher to extract all values and storing them. + // We can skip the case sensitivity check because we've already checked it and + // if the code reach this point then it means all matchers have the same case sensitivity. + multiMatcher := newEqualMultiStringMatcher(caseSensitive, numValues, numPrefixes, minPrefixLength) + + // Ignore the return value because we already iterated over the input StringMatcher + // and it was all good. + findEqualOrPrefixStringMatchers(input, func(matcher *equalStringMatcher) bool { + multiMatcher.add(matcher.s) + return true + }, func(prefix string, prefixCaseSensitive bool, matcher StringMatcher) bool { + multiMatcher.addPrefix(prefix, caseSensitive, matcher) + return true + }) + + return multiMatcher +} + +// findEqualOrPrefixStringMatchers analyze the input StringMatcher and calls the equalMatcherCallback for each +// equalStringMatcher found, and prefixMatcherCallback for each literalPrefixSensitiveStringMatcher and literalPrefixInsensitiveStringMatcher found. +// +// Returns true if and only if the input StringMatcher is *only* composed by an alternation of equalStringMatcher and/or +// literal prefix matcher. Returns false if prefixMatcherCallback is nil and a literal prefix matcher is encountered. +func findEqualOrPrefixStringMatchers(input StringMatcher, equalMatcherCallback func(matcher *equalStringMatcher) bool, prefixMatcherCallback func(prefix string, prefixCaseSensitive bool, matcher StringMatcher) bool) bool { + orInput, ok := input.(orStringMatcher) + if !ok { + return false + } + + for _, m := range orInput { + switch casted := m.(type) { + case orStringMatcher: + if !findEqualOrPrefixStringMatchers(m, equalMatcherCallback, prefixMatcherCallback) { + return false + } + + case *equalStringMatcher: + if !equalMatcherCallback(casted) { + return false + } + + case *literalPrefixSensitiveStringMatcher: + if prefixMatcherCallback == nil || !prefixMatcherCallback(casted.prefix, true, casted) { + return false + } + + case *literalPrefixInsensitiveStringMatcher: + if prefixMatcherCallback == nil || !prefixMatcherCallback(casted.prefix, false, casted) { + return false + } + + default: + // It's not an equal or prefix string matcher, so we have to stop searching + // cause this optimization can't be applied. + return false + } + } + + return true +} + +func hasPrefixCaseInsensitive(s, prefix string) bool { + return len(s) >= len(prefix) && strings.EqualFold(s[0:len(prefix)], prefix) +} + +func hasSuffixCaseInsensitive(s, suffix string) bool { + return len(s) >= len(suffix) && strings.EqualFold(s[len(s)-len(suffix):], suffix) +} + +func containsInOrder(s string, contains []string) bool { + // Optimization for the case we only have to look for 1 substring. + if len(contains) == 1 { + return strings.Contains(s, contains[0]) + } + + return containsInOrderMulti(s, contains) +} + +func containsInOrderMulti(s string, contains []string) bool { + offset := 0 + + for _, substr := range contains { + at := strings.Index(s[offset:], substr) + if at == -1 { + return false + } + + offset += at + len(substr) + } + + return true +} diff --git a/vendor/github.com/prometheus/prometheus/model/labels/sharding.go b/vendor/github.com/prometheus/prometheus/model/labels/sharding.go new file mode 100644 index 00000000..8b3a3693 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/model/labels/sharding.go @@ -0,0 +1,47 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !stringlabels && !dedupelabels + +package labels + +import ( + "github.com/cespare/xxhash/v2" +) + +// StableHash is a labels hashing implementation which is guaranteed to not change over time. +// This function should be used whenever labels hashing backward compatibility must be guaranteed. +func StableHash(ls Labels) uint64 { + // Use xxhash.Sum64(b) for fast path as it's faster. + b := make([]byte, 0, 1024) + for i, v := range ls { + if len(b)+len(v.Name)+len(v.Value)+2 >= cap(b) { + // If labels entry is 1KB+ do not allocate whole entry. + h := xxhash.New() + _, _ = h.Write(b) + for _, v := range ls[i:] { + _, _ = h.WriteString(v.Name) + _, _ = h.Write(seps) + _, _ = h.WriteString(v.Value) + _, _ = h.Write(seps) + } + return h.Sum64() + } + + b = append(b, v.Name...) + b = append(b, sep) + b = append(b, v.Value...) + b = append(b, sep) + } + return xxhash.Sum64(b) +} diff --git a/vendor/github.com/prometheus/prometheus/model/labels/sharding_dedupelabels.go b/vendor/github.com/prometheus/prometheus/model/labels/sharding_dedupelabels.go new file mode 100644 index 00000000..5bf41b05 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/model/labels/sharding_dedupelabels.go @@ -0,0 +1,52 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build dedupelabels + +package labels + +import ( + "github.com/cespare/xxhash/v2" +) + +// StableHash is a labels hashing implementation which is guaranteed to not change over time. +// This function should be used whenever labels hashing backward compatibility must be guaranteed. +func StableHash(ls Labels) uint64 { + // Use xxhash.Sum64(b) for fast path as it's faster. + b := make([]byte, 0, 1024) + for pos := 0; pos < len(ls.data); { + name, newPos := decodeString(ls.syms, ls.data, pos) + value, newPos := decodeString(ls.syms, ls.data, newPos) + if len(b)+len(name)+len(value)+2 >= cap(b) { + // If labels entry is 1KB+, hash the rest of them via Write(). + h := xxhash.New() + _, _ = h.Write(b) + for pos < len(ls.data) { + name, pos = decodeString(ls.syms, ls.data, pos) + value, pos = decodeString(ls.syms, ls.data, pos) + _, _ = h.WriteString(name) + _, _ = h.Write(seps) + _, _ = h.WriteString(value) + _, _ = h.Write(seps) + } + return h.Sum64() + } + + b = append(b, name...) + b = append(b, sep) + b = append(b, value...) + b = append(b, sep) + pos = newPos + } + return xxhash.Sum64(b) +} diff --git a/vendor/github.com/prometheus/prometheus/model/labels/sharding_stringlabels.go b/vendor/github.com/prometheus/prometheus/model/labels/sharding_stringlabels.go new file mode 100644 index 00000000..798f268e --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/model/labels/sharding_stringlabels.go @@ -0,0 +1,54 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build stringlabels + +package labels + +import ( + "github.com/cespare/xxhash/v2" +) + +// StableHash is a labels hashing implementation which is guaranteed to not change over time. +// This function should be used whenever labels hashing backward compatibility must be guaranteed. +func StableHash(ls Labels) uint64 { + // Use xxhash.Sum64(b) for fast path as it's faster. + b := make([]byte, 0, 1024) + var h *xxhash.Digest + for i := 0; i < len(ls.data); { + var v Label + v.Name, i = decodeString(ls.data, i) + v.Value, i = decodeString(ls.data, i) + if h == nil && len(b)+len(v.Name)+len(v.Value)+2 >= cap(b) { + // If labels entry is 1KB+, switch to Write API. Copy in the values up to this point. + h = xxhash.New() + _, _ = h.Write(b) + } + if h != nil { + _, _ = h.WriteString(v.Name) + _, _ = h.Write(seps) + _, _ = h.WriteString(v.Value) + _, _ = h.Write(seps) + continue + } + + b = append(b, v.Name...) + b = append(b, sep) + b = append(b, v.Value...) + b = append(b, sep) + } + if h != nil { + return h.Sum64() + } + return xxhash.Sum64(b) +} diff --git a/vendor/github.com/prometheus/prometheus/model/labels/test_utils.go b/vendor/github.com/prometheus/prometheus/model/labels/test_utils.go new file mode 100644 index 00000000..d060def4 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/model/labels/test_utils.go @@ -0,0 +1,87 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package labels + +import ( + "bufio" + "fmt" + "os" + "strings" +) + +// Slice is a sortable slice of label sets. +type Slice []Labels + +func (s Slice) Len() int { return len(s) } +func (s Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s Slice) Less(i, j int) bool { return Compare(s[i], s[j]) < 0 } + +// Selector holds constraints for matching against a label set. +type Selector []*Matcher + +// Matches returns whether the labels satisfy all matchers. +func (s Selector) Matches(labels Labels) bool { + for _, m := range s { + if v := labels.Get(m.Name); !m.Matches(v) { + return false + } + } + return true +} + +// ReadLabels reads up to n label sets in a JSON formatted file fn. It is mostly useful +// to load testing data. +func ReadLabels(fn string, n int) ([]Labels, error) { + f, err := os.Open(fn) + if err != nil { + return nil, err + } + defer f.Close() + + scanner := bufio.NewScanner(f) + b := NewScratchBuilder(0) + + var mets []Labels + hashes := map[uint64]struct{}{} + i := 0 + + for scanner.Scan() && i < n { + b.Reset() + + r := strings.NewReplacer("\"", "", "{", "", "}", "") + s := r.Replace(scanner.Text()) + + labelChunks := strings.Split(s, ",") + for _, labelChunk := range labelChunks { + split := strings.Split(labelChunk, ":") + b.Add(split[0], split[1]) + } + // Order of the k/v labels matters, don't assume we'll always receive them already sorted. + b.Sort() + m := b.Labels() + + h := m.Hash() + if _, ok := hashes[h]; ok { + continue + } + mets = append(mets, m) + hashes[h] = struct{}{} + i++ + } + + if i != n { + return mets, fmt.Errorf("requested %d metrics but found %d", n, i) + } + return mets, nil +} diff --git a/vendor/github.com/prometheus/prometheus/prompb/codec.go b/vendor/github.com/prometheus/prometheus/prompb/codec.go new file mode 100644 index 00000000..ad30cd5e --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/prompb/codec.go @@ -0,0 +1,201 @@ +// Copyright 2024 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prompb + +import ( + "strings" + + "github.com/prometheus/common/model" + + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/labels" +) + +// NOTE(bwplotka): This file's code is tested in /prompb/rwcommon. + +// ToLabels return model labels.Labels from timeseries' remote labels. +func (m TimeSeries) ToLabels(b *labels.ScratchBuilder, _ []string) labels.Labels { + return labelProtosToLabels(b, m.GetLabels()) +} + +// ToLabels return model labels.Labels from timeseries' remote labels. +func (m ChunkedSeries) ToLabels(b *labels.ScratchBuilder, _ []string) labels.Labels { + return labelProtosToLabels(b, m.GetLabels()) +} + +func labelProtosToLabels(b *labels.ScratchBuilder, labelPairs []Label) labels.Labels { + b.Reset() + for _, l := range labelPairs { + b.Add(l.Name, l.Value) + } + b.Sort() + return b.Labels() +} + +// FromLabels transforms labels into prompb labels. The buffer slice +// will be used to avoid allocations if it is big enough to store the labels. +func FromLabels(lbls labels.Labels, buf []Label) []Label { + result := buf[:0] + lbls.Range(func(l labels.Label) { + result = append(result, Label{ + Name: l.Name, + Value: l.Value, + }) + }) + return result +} + +// FromMetadataType transforms a Prometheus metricType into prompb metricType. Since the former is a string we need to transform it to an enum. +func FromMetadataType(t model.MetricType) MetricMetadata_MetricType { + mt := strings.ToUpper(string(t)) + v, ok := MetricMetadata_MetricType_value[mt] + if !ok { + return MetricMetadata_UNKNOWN + } + return MetricMetadata_MetricType(v) +} + +// IsFloatHistogram returns true if the histogram is float. +func (h Histogram) IsFloatHistogram() bool { + _, ok := h.GetCount().(*Histogram_CountFloat) + return ok +} + +// ToIntHistogram returns integer Prometheus histogram from the remote implementation +// of integer histogram. If it's a float histogram, the method returns nil. +func (h Histogram) ToIntHistogram() *histogram.Histogram { + if h.IsFloatHistogram() { + return nil + } + return &histogram.Histogram{ + CounterResetHint: histogram.CounterResetHint(h.ResetHint), + Schema: h.Schema, + ZeroThreshold: h.ZeroThreshold, + ZeroCount: h.GetZeroCountInt(), + Count: h.GetCountInt(), + Sum: h.Sum, + PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()), + PositiveBuckets: h.GetPositiveDeltas(), + NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()), + NegativeBuckets: h.GetNegativeDeltas(), + } +} + +// ToFloatHistogram returns float Prometheus histogram from the remote implementation +// of float histogram. If the underlying implementation is an integer histogram, a +// conversion is performed. +func (h Histogram) ToFloatHistogram() *histogram.FloatHistogram { + if h.IsFloatHistogram() { + return &histogram.FloatHistogram{ + CounterResetHint: histogram.CounterResetHint(h.ResetHint), + Schema: h.Schema, + ZeroThreshold: h.ZeroThreshold, + ZeroCount: h.GetZeroCountFloat(), + Count: h.GetCountFloat(), + Sum: h.Sum, + PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()), + PositiveBuckets: h.GetPositiveCounts(), + NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()), + NegativeBuckets: h.GetNegativeCounts(), + } + } + // Conversion from integer histogram. + return &histogram.FloatHistogram{ + CounterResetHint: histogram.CounterResetHint(h.ResetHint), + Schema: h.Schema, + ZeroThreshold: h.ZeroThreshold, + ZeroCount: float64(h.GetZeroCountInt()), + Count: float64(h.GetCountInt()), + Sum: h.Sum, + PositiveSpans: spansProtoToSpans(h.GetPositiveSpans()), + PositiveBuckets: deltasToCounts(h.GetPositiveDeltas()), + NegativeSpans: spansProtoToSpans(h.GetNegativeSpans()), + NegativeBuckets: deltasToCounts(h.GetNegativeDeltas()), + } +} + +func spansProtoToSpans(s []BucketSpan) []histogram.Span { + spans := make([]histogram.Span, len(s)) + for i := 0; i < len(s); i++ { + spans[i] = histogram.Span{Offset: s[i].Offset, Length: s[i].Length} + } + + return spans +} + +func deltasToCounts(deltas []int64) []float64 { + counts := make([]float64, len(deltas)) + var cur float64 + for i, d := range deltas { + cur += float64(d) + counts[i] = cur + } + return counts +} + +// FromIntHistogram returns remote Histogram from the integer Histogram. +func FromIntHistogram(timestamp int64, h *histogram.Histogram) Histogram { + return Histogram{ + Count: &Histogram_CountInt{CountInt: h.Count}, + Sum: h.Sum, + Schema: h.Schema, + ZeroThreshold: h.ZeroThreshold, + ZeroCount: &Histogram_ZeroCountInt{ZeroCountInt: h.ZeroCount}, + NegativeSpans: spansToSpansProto(h.NegativeSpans), + NegativeDeltas: h.NegativeBuckets, + PositiveSpans: spansToSpansProto(h.PositiveSpans), + PositiveDeltas: h.PositiveBuckets, + ResetHint: Histogram_ResetHint(h.CounterResetHint), + Timestamp: timestamp, + } +} + +// FromFloatHistogram returns remote Histogram from the float Histogram. +func FromFloatHistogram(timestamp int64, fh *histogram.FloatHistogram) Histogram { + return Histogram{ + Count: &Histogram_CountFloat{CountFloat: fh.Count}, + Sum: fh.Sum, + Schema: fh.Schema, + ZeroThreshold: fh.ZeroThreshold, + ZeroCount: &Histogram_ZeroCountFloat{ZeroCountFloat: fh.ZeroCount}, + NegativeSpans: spansToSpansProto(fh.NegativeSpans), + NegativeCounts: fh.NegativeBuckets, + PositiveSpans: spansToSpansProto(fh.PositiveSpans), + PositiveCounts: fh.PositiveBuckets, + ResetHint: Histogram_ResetHint(fh.CounterResetHint), + Timestamp: timestamp, + } +} + +func spansToSpansProto(s []histogram.Span) []BucketSpan { + spans := make([]BucketSpan, len(s)) + for i := 0; i < len(s); i++ { + spans[i] = BucketSpan{Offset: s[i].Offset, Length: s[i].Length} + } + + return spans +} + +// ToExemplar converts remote exemplar to model exemplar. +func (m Exemplar) ToExemplar(b *labels.ScratchBuilder, _ []string) exemplar.Exemplar { + timestamp := m.Timestamp + + return exemplar.Exemplar{ + Labels: labelProtosToLabels(b, m.GetLabels()), + Value: m.Value, + Ts: timestamp, + HasTs: timestamp != 0, + } +} diff --git a/vendor/github.com/prometheus/prometheus/prompb/custom.go b/vendor/github.com/prometheus/prometheus/prompb/custom.go index 13d6e0f0..f73ddd44 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/custom.go +++ b/vendor/github.com/prometheus/prometheus/prompb/custom.go @@ -17,14 +17,6 @@ import ( "sync" ) -func (m Sample) T() int64 { return m.Timestamp } -func (m Sample) V() float64 { return m.Value } - -func (h Histogram) IsFloatHistogram() bool { - _, ok := h.GetCount().(*Histogram_CountFloat) - return ok -} - func (r *ChunkedReadResponse) PooledMarshal(p *sync.Pool) ([]byte, error) { size := r.Size() data, ok := p.Get().(*[]byte) diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go index 4d4b4aad..7e19eba0 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_compare.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go @@ -7,10 +7,13 @@ import ( "time" ) -type CompareType int +// Deprecated: CompareType has only ever been for internal use and has accidentally been published since v1.6.0. Do not use it. +type CompareType = compareResult + +type compareResult int const ( - compareLess CompareType = iota - 1 + compareLess compareResult = iota - 1 compareEqual compareGreater ) @@ -39,7 +42,7 @@ var ( bytesType = reflect.TypeOf([]byte{}) ) -func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { +func compare(obj1, obj2 interface{}, kind reflect.Kind) (compareResult, bool) { obj1Value := reflect.ValueOf(obj1) obj2Value := reflect.ValueOf(obj2) @@ -325,7 +328,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { timeObj2 = obj2Value.Convert(timeType).Interface().(time.Time) } - return compare(timeObj1.UnixNano(), timeObj2.UnixNano(), reflect.Int64) + if timeObj1.Before(timeObj2) { + return compareLess, true + } + if timeObj1.Equal(timeObj2) { + return compareEqual, true + } + return compareGreater, true } case reflect.Slice: { @@ -345,7 +354,7 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { bytesObj2 = obj2Value.Convert(bytesType).Interface().([]byte) } - return CompareType(bytes.Compare(bytesObj1, bytesObj2)), true + return compareResult(bytes.Compare(bytesObj1, bytesObj2)), true } case reflect.Uintptr: { @@ -381,7 +390,7 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) + return compareTwoValues(t, e1, e2, []compareResult{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) } // GreaterOrEqual asserts that the first element is greater than or equal to the second @@ -394,7 +403,7 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) + return compareTwoValues(t, e1, e2, []compareResult{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) } // Less asserts that the first element is less than the second @@ -406,7 +415,7 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) + return compareTwoValues(t, e1, e2, []compareResult{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) } // LessOrEqual asserts that the first element is less than or equal to the second @@ -419,7 +428,7 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) + return compareTwoValues(t, e1, e2, []compareResult{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) } // Positive asserts that the specified element is positive @@ -431,7 +440,7 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { h.Helper() } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs...) + return compareTwoValues(t, e, zero.Interface(), []compareResult{compareGreater}, "\"%v\" is not positive", msgAndArgs...) } // Negative asserts that the specified element is negative @@ -443,10 +452,10 @@ func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { h.Helper() } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs...) + return compareTwoValues(t, e, zero.Interface(), []compareResult{compareLess}, "\"%v\" is not negative", msgAndArgs...) } -func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool { +func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []compareResult, failMessage string, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() } @@ -469,7 +478,7 @@ func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedCompare return true } -func containsValue(values []CompareType, value CompareType) bool { +func containsValue(values []compareResult, value compareResult) bool { for _, v := range values { if v == value { return true diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go index 3ddab109..19063416 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -104,8 +104,8 @@ func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, return EqualExportedValues(t, expected, actual, append([]interface{}{msg}, args...)...) } -// EqualValuesf asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValuesf asserts that two objects are equal or convertible to the larger +// type and equal. // // assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { @@ -186,7 +186,7 @@ func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick // assert.EventuallyWithTf(t, func(c *assert.CollectT, "error message %s", "formatted") { // // add assertions as needed; any assertion failure will fail the current tick // assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func EventuallyWithTf(t TestingT, condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -568,6 +568,23 @@ func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, a return NotContains(t, s, contains, append([]interface{}{msg}, args...)...) } +// NotElementsMatchf asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// assert.NotElementsMatchf(t, [1, 1, 2, 3], [1, 1, 2, 3], "error message %s", "formatted") -> false +// +// assert.NotElementsMatchf(t, [1, 1, 2, 3], [1, 2, 3], "error message %s", "formatted") -> true +// +// assert.NotElementsMatchf(t, [1, 2, 3], [1, 2, 4], "error message %s", "formatted") -> true +func NotElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...) +} + // NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // a slice or a channel with len == 0. // @@ -604,7 +621,16 @@ func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg s return NotEqualValues(t, expected, actual, append([]interface{}{msg}, args...)...) } -// NotErrorIsf asserts that at none of the errors in err's chain matches target. +// NotErrorAsf asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func NotErrorAsf(t TestingT, err error, target interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotErrorAs(t, err, target, append([]interface{}{msg}, args...)...) +} + +// NotErrorIsf asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go index a84e09bd..21629087 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -186,8 +186,8 @@ func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface return EqualExportedValuesf(a.t, expected, actual, msg, args...) } -// EqualValues asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValues asserts that two objects are equal or convertible to the larger +// type and equal. // // a.EqualValues(uint32(123), int32(123)) func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { @@ -197,8 +197,8 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn return EqualValues(a.t, expected, actual, msgAndArgs...) } -// EqualValuesf asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValuesf asserts that two objects are equal or convertible to the larger +// type and equal. // // a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { @@ -336,7 +336,7 @@ func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, ti // a.EventuallyWithT(func(c *assert.CollectT) { // // add assertions as needed; any assertion failure will fail the current tick // assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func (a *Assertions) EventuallyWithT(condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -361,7 +361,7 @@ func (a *Assertions) EventuallyWithT(condition func(collect *CollectT), waitFor // a.EventuallyWithTf(func(c *assert.CollectT, "error message %s", "formatted") { // // add assertions as needed; any assertion failure will fail the current tick // assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func (a *Assertions) EventuallyWithTf(condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1128,6 +1128,40 @@ func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg strin return NotContainsf(a.t, s, contains, msg, args...) } +// NotElementsMatch asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// a.NotElementsMatch([1, 1, 2, 3], [1, 1, 2, 3]) -> false +// +// a.NotElementsMatch([1, 1, 2, 3], [1, 2, 3]) -> true +// +// a.NotElementsMatch([1, 2, 3], [1, 2, 4]) -> true +func (a *Assertions) NotElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotElementsMatch(a.t, listA, listB, msgAndArgs...) +} + +// NotElementsMatchf asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// a.NotElementsMatchf([1, 1, 2, 3], [1, 1, 2, 3], "error message %s", "formatted") -> false +// +// a.NotElementsMatchf([1, 1, 2, 3], [1, 2, 3], "error message %s", "formatted") -> true +// +// a.NotElementsMatchf([1, 2, 3], [1, 2, 4], "error message %s", "formatted") -> true +func (a *Assertions) NotElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotElementsMatchf(a.t, listA, listB, msg, args...) +} + // NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // a slice or a channel with len == 0. // @@ -1200,7 +1234,25 @@ func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg str return NotEqualf(a.t, expected, actual, msg, args...) } -// NotErrorIs asserts that at none of the errors in err's chain matches target. +// NotErrorAs asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func (a *Assertions) NotErrorAs(err error, target interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotErrorAs(a.t, err, target, msgAndArgs...) +} + +// NotErrorAsf asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func (a *Assertions) NotErrorAsf(err error, target interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotErrorAsf(a.t, err, target, msg, args...) +} + +// NotErrorIs asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { @@ -1209,7 +1261,7 @@ func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface return NotErrorIs(a.t, err, target, msgAndArgs...) } -// NotErrorIsf asserts that at none of the errors in err's chain matches target. +// NotErrorIsf asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { diff --git a/vendor/github.com/stretchr/testify/assert/assertion_order.go b/vendor/github.com/stretchr/testify/assert/assertion_order.go index 00df62a0..1d2f7182 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_order.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_order.go @@ -6,7 +6,7 @@ import ( ) // isOrdered checks that collection contains orderable elements. -func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool { +func isOrdered(t TestingT, object interface{}, allowedComparesResults []compareResult, failMessage string, msgAndArgs ...interface{}) bool { objKind := reflect.TypeOf(object).Kind() if objKind != reflect.Slice && objKind != reflect.Array { return false @@ -50,7 +50,7 @@ func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareT // assert.IsIncreasing(t, []float{1, 2}) // assert.IsIncreasing(t, []string{"a", "b"}) func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) + return isOrdered(t, object, []compareResult{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) } // IsNonIncreasing asserts that the collection is not increasing @@ -59,7 +59,7 @@ func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo // assert.IsNonIncreasing(t, []float{2, 1}) // assert.IsNonIncreasing(t, []string{"b", "a"}) func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) + return isOrdered(t, object, []compareResult{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) } // IsDecreasing asserts that the collection is decreasing @@ -68,7 +68,7 @@ func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) // assert.IsDecreasing(t, []float{2, 1}) // assert.IsDecreasing(t, []string{"b", "a"}) func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) + return isOrdered(t, object, []compareResult{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) } // IsNonDecreasing asserts that the collection is not decreasing @@ -77,5 +77,5 @@ func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo // assert.IsNonDecreasing(t, []float{1, 2}) // assert.IsNonDecreasing(t, []string{"a", "b"}) func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) + return isOrdered(t, object, []compareResult{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) } diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go index 0b7570f2..4e91332b 100644 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -19,7 +19,9 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/pmezard/go-difflib/difflib" - "gopkg.in/yaml.v3" + + // Wrapper around gopkg.in/yaml.v3 + "github.com/stretchr/testify/assert/yaml" ) //go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_format.go.tmpl" @@ -45,6 +47,10 @@ type BoolAssertionFunc func(TestingT, bool, ...interface{}) bool // for table driven tests. type ErrorAssertionFunc func(TestingT, error, ...interface{}) bool +// PanicAssertionFunc is a common function prototype when validating a panic value. Can be useful +// for table driven tests. +type PanicAssertionFunc = func(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool + // Comparison is a custom function that returns true on success and false on failure type Comparison func() (success bool) @@ -496,7 +502,13 @@ func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) b h.Helper() } - if !samePointers(expected, actual) { + same, ok := samePointers(expected, actual) + if !ok { + return Fail(t, "Both arguments must be pointers", msgAndArgs...) + } + + if !same { + // both are pointers but not the same type & pointing to the same address return Fail(t, fmt.Sprintf("Not same: \n"+ "expected: %p %#v\n"+ "actual : %p %#v", expected, expected, actual, actual), msgAndArgs...) @@ -516,7 +528,13 @@ func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{} h.Helper() } - if samePointers(expected, actual) { + same, ok := samePointers(expected, actual) + if !ok { + //fails when the arguments are not pointers + return !(Fail(t, "Both arguments must be pointers", msgAndArgs...)) + } + + if same { return Fail(t, fmt.Sprintf( "Expected and actual point to the same object: %p %#v", expected, expected), msgAndArgs...) @@ -524,21 +542,23 @@ func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{} return true } -// samePointers compares two generic interface objects and returns whether -// they point to the same object -func samePointers(first, second interface{}) bool { +// samePointers checks if two generic interface objects are pointers of the same +// type pointing to the same object. It returns two values: same indicating if +// they are the same type and point to the same object, and ok indicating that +// both inputs are pointers. +func samePointers(first, second interface{}) (same bool, ok bool) { firstPtr, secondPtr := reflect.ValueOf(first), reflect.ValueOf(second) if firstPtr.Kind() != reflect.Ptr || secondPtr.Kind() != reflect.Ptr { - return false + return false, false //not both are pointers } firstType, secondType := reflect.TypeOf(first), reflect.TypeOf(second) if firstType != secondType { - return false + return false, true // both are pointers, but of different types } // compare pointer addresses - return first == second + return first == second, true } // formatUnequalValues takes two values of arbitrary types and returns string @@ -572,8 +592,8 @@ func truncatingFormat(data interface{}) string { return value } -// EqualValues asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValues asserts that two objects are equal or convertible to the larger +// type and equal. // // assert.EqualValues(t, uint32(123), int32(123)) func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { @@ -615,21 +635,6 @@ func EqualExportedValues(t TestingT, expected, actual interface{}, msgAndArgs .. return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...) } - if aType.Kind() == reflect.Ptr { - aType = aType.Elem() - } - if bType.Kind() == reflect.Ptr { - bType = bType.Elem() - } - - if aType.Kind() != reflect.Struct { - return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", aType.Kind(), reflect.Struct), msgAndArgs...) - } - - if bType.Kind() != reflect.Struct { - return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", bType.Kind(), reflect.Struct), msgAndArgs...) - } - expected = copyExportedFields(expected) actual = copyExportedFields(actual) @@ -1170,6 +1175,39 @@ func formatListDiff(listA, listB interface{}, extraA, extraB []interface{}) stri return msg.String() } +// NotElementsMatch asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// assert.NotElementsMatch(t, [1, 1, 2, 3], [1, 1, 2, 3]) -> false +// +// assert.NotElementsMatch(t, [1, 1, 2, 3], [1, 2, 3]) -> true +// +// assert.NotElementsMatch(t, [1, 2, 3], [1, 2, 4]) -> true +func NotElementsMatch(t TestingT, listA, listB interface{}, msgAndArgs ...interface{}) (ok bool) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if isEmpty(listA) && isEmpty(listB) { + return Fail(t, "listA and listB contain the same elements", msgAndArgs) + } + + if !isList(t, listA, msgAndArgs...) { + return Fail(t, "listA is not a list type", msgAndArgs...) + } + if !isList(t, listB, msgAndArgs...) { + return Fail(t, "listB is not a list type", msgAndArgs...) + } + + extraA, extraB := diffLists(listA, listB) + if len(extraA) == 0 && len(extraB) == 0 { + return Fail(t, "listA and listB contain the same elements", msgAndArgs) + } + + return true +} + // Condition uses a Comparison to assert a complex condition. func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { @@ -1488,6 +1526,9 @@ func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAnd if err != nil { return Fail(t, err.Error(), msgAndArgs...) } + if math.IsNaN(actualEpsilon) { + return Fail(t, "relative error is NaN", msgAndArgs...) + } if actualEpsilon > epsilon { return Fail(t, fmt.Sprintf("Relative error is too high: %#v (expected)\n"+ " < %#v (actual)", epsilon, actualEpsilon), msgAndArgs...) @@ -1611,7 +1652,6 @@ func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...in // matchRegexp return true if a specified regexp matches a string. func matchRegexp(rx interface{}, str interface{}) bool { - var r *regexp.Regexp if rr, ok := rx.(*regexp.Regexp); ok { r = rr @@ -1619,7 +1659,14 @@ func matchRegexp(rx interface{}, str interface{}) bool { r = regexp.MustCompile(fmt.Sprint(rx)) } - return (r.FindStringIndex(fmt.Sprint(str)) != nil) + switch v := str.(type) { + case []byte: + return r.Match(v) + case string: + return r.MatchString(v) + default: + return r.MatchString(fmt.Sprint(v)) + } } @@ -1872,7 +1919,7 @@ var spewConfigStringerEnabled = spew.ConfigState{ MaxDepth: 10, } -type tHelper interface { +type tHelper = interface { Helper() } @@ -1911,6 +1958,9 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t // CollectT implements the TestingT interface and collects all errors. type CollectT struct { + // A slice of errors. Non-nil slice denotes a failure. + // If it's non-nil but len(c.errors) == 0, this is also a failure + // obtained by direct c.FailNow() call. errors []error } @@ -1919,9 +1969,10 @@ func (c *CollectT) Errorf(format string, args ...interface{}) { c.errors = append(c.errors, fmt.Errorf(format, args...)) } -// FailNow panics. -func (*CollectT) FailNow() { - panic("Assertion failed") +// FailNow stops execution by calling runtime.Goexit. +func (c *CollectT) FailNow() { + c.fail() + runtime.Goexit() } // Deprecated: That was a method for internal usage that should not have been published. Now just panics. @@ -1934,6 +1985,16 @@ func (*CollectT) Copy(TestingT) { panic("Copy() is deprecated") } +func (c *CollectT) fail() { + if !c.failed() { + c.errors = []error{} // Make it non-nil to mark a failure. + } +} + +func (c *CollectT) failed() bool { + return c.errors != nil +} + // EventuallyWithT asserts that given condition will be met in waitFor time, // periodically checking target function each tick. In contrast to Eventually, // it supplies a CollectT to the condition function, so that the condition @@ -1951,14 +2012,14 @@ func (*CollectT) Copy(TestingT) { // assert.EventuallyWithT(t, func(c *assert.CollectT) { // // add assertions as needed; any assertion failure will fail the current tick // assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() } var lastFinishedTickErrs []error - ch := make(chan []error, 1) + ch := make(chan *CollectT, 1) timer := time.NewTimer(waitFor) defer timer.Stop() @@ -1978,16 +2039,16 @@ func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time go func() { collect := new(CollectT) defer func() { - ch <- collect.errors + ch <- collect }() condition(collect) }() - case errs := <-ch: - if len(errs) == 0 { + case collect := <-ch: + if !collect.failed() { return true } // Keep the errors from the last ended condition, so that they can be copied to t if timeout is reached. - lastFinishedTickErrs = errs + lastFinishedTickErrs = collect.errors tick = ticker.C } } @@ -2049,7 +2110,7 @@ func ErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool { ), msgAndArgs...) } -// NotErrorIs asserts that at none of the errors in err's chain matches target. +// NotErrorIs asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func NotErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { @@ -2090,6 +2151,24 @@ func ErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{ ), msgAndArgs...) } +// NotErrorAs asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func NotErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if !errors.As(err, target) { + return true + } + + chain := buildErrorChainString(err) + + return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+ + "found: %q\n"+ + "in chain: %s", target, chain, + ), msgAndArgs...) +} + func buildErrorChainString(err error) string { if err == nil { return "" diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go new file mode 100644 index 00000000..baa0cc7d --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go @@ -0,0 +1,25 @@ +//go:build testify_yaml_custom && !testify_yaml_fail && !testify_yaml_default +// +build testify_yaml_custom,!testify_yaml_fail,!testify_yaml_default + +// Package yaml is an implementation of YAML functions that calls a pluggable implementation. +// +// This implementation is selected with the testify_yaml_custom build tag. +// +// go test -tags testify_yaml_custom +// +// This implementation can be used at build time to replace the default implementation +// to avoid linking with [gopkg.in/yaml.v3]. +// +// In your test package: +// +// import assertYaml "github.com/stretchr/testify/assert/yaml" +// +// func init() { +// assertYaml.Unmarshal = func (in []byte, out interface{}) error { +// // ... +// return nil +// } +// } +package yaml + +var Unmarshal func(in []byte, out interface{}) error diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go new file mode 100644 index 00000000..b83c6cf6 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go @@ -0,0 +1,37 @@ +//go:build !testify_yaml_fail && !testify_yaml_custom +// +build !testify_yaml_fail,!testify_yaml_custom + +// Package yaml is just an indirection to handle YAML deserialization. +// +// This package is just an indirection that allows the builder to override the +// indirection with an alternative implementation of this package that uses +// another implementation of YAML deserialization. This allows to not either not +// use YAML deserialization at all, or to use another implementation than +// [gopkg.in/yaml.v3] (for example for license compatibility reasons, see [PR #1120]). +// +// Alternative implementations are selected using build tags: +// +// - testify_yaml_fail: [Unmarshal] always fails with an error +// - testify_yaml_custom: [Unmarshal] is a variable. Caller must initialize it +// before calling any of [github.com/stretchr/testify/assert.YAMLEq] or +// [github.com/stretchr/testify/assert.YAMLEqf]. +// +// Usage: +// +// go test -tags testify_yaml_fail +// +// You can check with "go list" which implementation is linked: +// +// go list -f '{{.Imports}}' github.com/stretchr/testify/assert/yaml +// go list -tags testify_yaml_fail -f '{{.Imports}}' github.com/stretchr/testify/assert/yaml +// go list -tags testify_yaml_custom -f '{{.Imports}}' github.com/stretchr/testify/assert/yaml +// +// [PR #1120]: https://github.com/stretchr/testify/pull/1120 +package yaml + +import goyaml "gopkg.in/yaml.v3" + +// Unmarshal is just a wrapper of [gopkg.in/yaml.v3.Unmarshal]. +func Unmarshal(in []byte, out interface{}) error { + return goyaml.Unmarshal(in, out) +} diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go new file mode 100644 index 00000000..e78f7dfe --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go @@ -0,0 +1,18 @@ +//go:build testify_yaml_fail && !testify_yaml_custom && !testify_yaml_default +// +build testify_yaml_fail,!testify_yaml_custom,!testify_yaml_default + +// Package yaml is an implementation of YAML functions that always fail. +// +// This implementation can be used at build time to replace the default implementation +// to avoid linking with [gopkg.in/yaml.v3]: +// +// go test -tags testify_yaml_fail +package yaml + +import "errors" + +var errNotImplemented = errors.New("YAML functions are not available (see https://pkg.go.dev/github.com/stretchr/testify/assert/yaml)") + +func Unmarshal([]byte, interface{}) error { + return errNotImplemented +} diff --git a/vendor/golang.org/x/crypto/LICENSE b/vendor/golang.org/x/crypto/LICENSE index 6a66aea5..2a7cf70d 100644 --- a/vendor/golang.org/x/crypto/LICENSE +++ b/vendor/golang.org/x/crypto/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/crypto/sha3/doc.go b/vendor/golang.org/x/crypto/sha3/doc.go index decd8cf9..bbf391fe 100644 --- a/vendor/golang.org/x/crypto/sha3/doc.go +++ b/vendor/golang.org/x/crypto/sha3/doc.go @@ -5,6 +5,10 @@ // Package sha3 implements the SHA-3 fixed-output-length hash functions and // the SHAKE variable-output-length hash functions defined by FIPS-202. // +// All types in this package also implement [encoding.BinaryMarshaler], +// [encoding.BinaryAppender] and [encoding.BinaryUnmarshaler] to marshal and +// unmarshal the internal state of the hash. +// // Both types of hash function use the "sponge" construction and the Keccak // permutation. For a detailed specification see http://keccak.noekeon.org/ // @@ -59,4 +63,4 @@ // They produce output of the same length, with the same security strengths // against all attacks. This means, in particular, that SHA3-256 only has // 128-bit collision resistance, because its output length is 32 bytes. -package sha3 // import "golang.org/x/crypto/sha3" +package sha3 diff --git a/vendor/golang.org/x/crypto/sha3/hashes.go b/vendor/golang.org/x/crypto/sha3/hashes.go index 0d8043fd..31fffbe0 100644 --- a/vendor/golang.org/x/crypto/sha3/hashes.go +++ b/vendor/golang.org/x/crypto/sha3/hashes.go @@ -9,6 +9,7 @@ package sha3 // bytes. import ( + "crypto" "hash" ) @@ -16,53 +17,83 @@ import ( // Its generic security strength is 224 bits against preimage attacks, // and 112 bits against collision attacks. func New224() hash.Hash { - if h := new224Asm(); h != nil { - return h - } - return &state{rate: 144, outputLen: 28, dsbyte: 0x06} + return new224() } // New256 creates a new SHA3-256 hash. // Its generic security strength is 256 bits against preimage attacks, // and 128 bits against collision attacks. func New256() hash.Hash { - if h := new256Asm(); h != nil { - return h - } - return &state{rate: 136, outputLen: 32, dsbyte: 0x06} + return new256() } // New384 creates a new SHA3-384 hash. // Its generic security strength is 384 bits against preimage attacks, // and 192 bits against collision attacks. func New384() hash.Hash { - if h := new384Asm(); h != nil { - return h - } - return &state{rate: 104, outputLen: 48, dsbyte: 0x06} + return new384() } // New512 creates a new SHA3-512 hash. // Its generic security strength is 512 bits against preimage attacks, // and 256 bits against collision attacks. func New512() hash.Hash { - if h := new512Asm(); h != nil { - return h - } - return &state{rate: 72, outputLen: 64, dsbyte: 0x06} + return new512() +} + +func init() { + crypto.RegisterHash(crypto.SHA3_224, New224) + crypto.RegisterHash(crypto.SHA3_256, New256) + crypto.RegisterHash(crypto.SHA3_384, New384) + crypto.RegisterHash(crypto.SHA3_512, New512) +} + +const ( + dsbyteSHA3 = 0b00000110 + dsbyteKeccak = 0b00000001 + dsbyteShake = 0b00011111 + dsbyteCShake = 0b00000100 + + // rateK[c] is the rate in bytes for Keccak[c] where c is the capacity in + // bits. Given the sponge size is 1600 bits, the rate is 1600 - c bits. + rateK256 = (1600 - 256) / 8 + rateK448 = (1600 - 448) / 8 + rateK512 = (1600 - 512) / 8 + rateK768 = (1600 - 768) / 8 + rateK1024 = (1600 - 1024) / 8 +) + +func new224Generic() *state { + return &state{rate: rateK448, outputLen: 28, dsbyte: dsbyteSHA3} +} + +func new256Generic() *state { + return &state{rate: rateK512, outputLen: 32, dsbyte: dsbyteSHA3} +} + +func new384Generic() *state { + return &state{rate: rateK768, outputLen: 48, dsbyte: dsbyteSHA3} +} + +func new512Generic() *state { + return &state{rate: rateK1024, outputLen: 64, dsbyte: dsbyteSHA3} } // NewLegacyKeccak256 creates a new Keccak-256 hash. // // Only use this function if you require compatibility with an existing cryptosystem // that uses non-standard padding. All other users should use New256 instead. -func NewLegacyKeccak256() hash.Hash { return &state{rate: 136, outputLen: 32, dsbyte: 0x01} } +func NewLegacyKeccak256() hash.Hash { + return &state{rate: rateK512, outputLen: 32, dsbyte: dsbyteKeccak} +} // NewLegacyKeccak512 creates a new Keccak-512 hash. // // Only use this function if you require compatibility with an existing cryptosystem // that uses non-standard padding. All other users should use New512 instead. -func NewLegacyKeccak512() hash.Hash { return &state{rate: 72, outputLen: 64, dsbyte: 0x01} } +func NewLegacyKeccak512() hash.Hash { + return &state{rate: rateK1024, outputLen: 64, dsbyte: dsbyteKeccak} +} // Sum224 returns the SHA3-224 digest of the data. func Sum224(data []byte) (digest [28]byte) { diff --git a/vendor/golang.org/x/crypto/sha3/hashes_generic.go b/vendor/golang.org/x/crypto/sha3/hashes_generic.go deleted file mode 100644 index fe8c8479..00000000 --- a/vendor/golang.org/x/crypto/sha3/hashes_generic.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !gc || purego || !s390x - -package sha3 - -import ( - "hash" -) - -// new224Asm returns an assembly implementation of SHA3-224 if available, -// otherwise it returns nil. -func new224Asm() hash.Hash { return nil } - -// new256Asm returns an assembly implementation of SHA3-256 if available, -// otherwise it returns nil. -func new256Asm() hash.Hash { return nil } - -// new384Asm returns an assembly implementation of SHA3-384 if available, -// otherwise it returns nil. -func new384Asm() hash.Hash { return nil } - -// new512Asm returns an assembly implementation of SHA3-512 if available, -// otherwise it returns nil. -func new512Asm() hash.Hash { return nil } diff --git a/vendor/golang.org/x/crypto/sha3/hashes_noasm.go b/vendor/golang.org/x/crypto/sha3/hashes_noasm.go new file mode 100644 index 00000000..9d85fb62 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/hashes_noasm.go @@ -0,0 +1,23 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !gc || purego || !s390x + +package sha3 + +func new224() *state { + return new224Generic() +} + +func new256() *state { + return new256Generic() +} + +func new384() *state { + return new384Generic() +} + +func new512() *state { + return new512Generic() +} diff --git a/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s index 1f539388..99e2f16e 100644 --- a/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s +++ b/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s @@ -1,390 +1,5419 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. +// Code generated by command: go run keccakf_amd64_asm.go -out ../keccakf_amd64.s -pkg sha3. DO NOT EDIT. //go:build amd64 && !purego && gc -// This code was translated into a form compatible with 6a from the public -// domain sources at https://github.com/gvanas/KeccakCodePackage - -// Offsets in state -#define _ba (0*8) -#define _be (1*8) -#define _bi (2*8) -#define _bo (3*8) -#define _bu (4*8) -#define _ga (5*8) -#define _ge (6*8) -#define _gi (7*8) -#define _go (8*8) -#define _gu (9*8) -#define _ka (10*8) -#define _ke (11*8) -#define _ki (12*8) -#define _ko (13*8) -#define _ku (14*8) -#define _ma (15*8) -#define _me (16*8) -#define _mi (17*8) -#define _mo (18*8) -#define _mu (19*8) -#define _sa (20*8) -#define _se (21*8) -#define _si (22*8) -#define _so (23*8) -#define _su (24*8) - -// Temporary registers -#define rT1 AX - -// Round vars -#define rpState DI -#define rpStack SP - -#define rDa BX -#define rDe CX -#define rDi DX -#define rDo R8 -#define rDu R9 - -#define rBa R10 -#define rBe R11 -#define rBi R12 -#define rBo R13 -#define rBu R14 - -#define rCa SI -#define rCe BP -#define rCi rBi -#define rCo rBo -#define rCu R15 - -#define MOVQ_RBI_RCE MOVQ rBi, rCe -#define XORQ_RT1_RCA XORQ rT1, rCa -#define XORQ_RT1_RCE XORQ rT1, rCe -#define XORQ_RBA_RCU XORQ rBa, rCu -#define XORQ_RBE_RCU XORQ rBe, rCu -#define XORQ_RDU_RCU XORQ rDu, rCu -#define XORQ_RDA_RCA XORQ rDa, rCa -#define XORQ_RDE_RCE XORQ rDe, rCe - -#define mKeccakRound(iState, oState, rc, B_RBI_RCE, G_RT1_RCA, G_RT1_RCE, G_RBA_RCU, K_RT1_RCA, K_RT1_RCE, K_RBA_RCU, M_RT1_RCA, M_RT1_RCE, M_RBE_RCU, S_RDU_RCU, S_RDA_RCA, S_RDE_RCE) \ - /* Prepare round */ \ - MOVQ rCe, rDa; \ - ROLQ $1, rDa; \ - \ - MOVQ _bi(iState), rCi; \ - XORQ _gi(iState), rDi; \ - XORQ rCu, rDa; \ - XORQ _ki(iState), rCi; \ - XORQ _mi(iState), rDi; \ - XORQ rDi, rCi; \ - \ - MOVQ rCi, rDe; \ - ROLQ $1, rDe; \ - \ - MOVQ _bo(iState), rCo; \ - XORQ _go(iState), rDo; \ - XORQ rCa, rDe; \ - XORQ _ko(iState), rCo; \ - XORQ _mo(iState), rDo; \ - XORQ rDo, rCo; \ - \ - MOVQ rCo, rDi; \ - ROLQ $1, rDi; \ - \ - MOVQ rCu, rDo; \ - XORQ rCe, rDi; \ - ROLQ $1, rDo; \ - \ - MOVQ rCa, rDu; \ - XORQ rCi, rDo; \ - ROLQ $1, rDu; \ - \ - /* Result b */ \ - MOVQ _ba(iState), rBa; \ - MOVQ _ge(iState), rBe; \ - XORQ rCo, rDu; \ - MOVQ _ki(iState), rBi; \ - MOVQ _mo(iState), rBo; \ - MOVQ _su(iState), rBu; \ - XORQ rDe, rBe; \ - ROLQ $44, rBe; \ - XORQ rDi, rBi; \ - XORQ rDa, rBa; \ - ROLQ $43, rBi; \ - \ - MOVQ rBe, rCa; \ - MOVQ rc, rT1; \ - ORQ rBi, rCa; \ - XORQ rBa, rT1; \ - XORQ rT1, rCa; \ - MOVQ rCa, _ba(oState); \ - \ - XORQ rDu, rBu; \ - ROLQ $14, rBu; \ - MOVQ rBa, rCu; \ - ANDQ rBe, rCu; \ - XORQ rBu, rCu; \ - MOVQ rCu, _bu(oState); \ - \ - XORQ rDo, rBo; \ - ROLQ $21, rBo; \ - MOVQ rBo, rT1; \ - ANDQ rBu, rT1; \ - XORQ rBi, rT1; \ - MOVQ rT1, _bi(oState); \ - \ - NOTQ rBi; \ - ORQ rBa, rBu; \ - ORQ rBo, rBi; \ - XORQ rBo, rBu; \ - XORQ rBe, rBi; \ - MOVQ rBu, _bo(oState); \ - MOVQ rBi, _be(oState); \ - B_RBI_RCE; \ - \ - /* Result g */ \ - MOVQ _gu(iState), rBe; \ - XORQ rDu, rBe; \ - MOVQ _ka(iState), rBi; \ - ROLQ $20, rBe; \ - XORQ rDa, rBi; \ - ROLQ $3, rBi; \ - MOVQ _bo(iState), rBa; \ - MOVQ rBe, rT1; \ - ORQ rBi, rT1; \ - XORQ rDo, rBa; \ - MOVQ _me(iState), rBo; \ - MOVQ _si(iState), rBu; \ - ROLQ $28, rBa; \ - XORQ rBa, rT1; \ - MOVQ rT1, _ga(oState); \ - G_RT1_RCA; \ - \ - XORQ rDe, rBo; \ - ROLQ $45, rBo; \ - MOVQ rBi, rT1; \ - ANDQ rBo, rT1; \ - XORQ rBe, rT1; \ - MOVQ rT1, _ge(oState); \ - G_RT1_RCE; \ - \ - XORQ rDi, rBu; \ - ROLQ $61, rBu; \ - MOVQ rBu, rT1; \ - ORQ rBa, rT1; \ - XORQ rBo, rT1; \ - MOVQ rT1, _go(oState); \ - \ - ANDQ rBe, rBa; \ - XORQ rBu, rBa; \ - MOVQ rBa, _gu(oState); \ - NOTQ rBu; \ - G_RBA_RCU; \ - \ - ORQ rBu, rBo; \ - XORQ rBi, rBo; \ - MOVQ rBo, _gi(oState); \ - \ - /* Result k */ \ - MOVQ _be(iState), rBa; \ - MOVQ _gi(iState), rBe; \ - MOVQ _ko(iState), rBi; \ - MOVQ _mu(iState), rBo; \ - MOVQ _sa(iState), rBu; \ - XORQ rDi, rBe; \ - ROLQ $6, rBe; \ - XORQ rDo, rBi; \ - ROLQ $25, rBi; \ - MOVQ rBe, rT1; \ - ORQ rBi, rT1; \ - XORQ rDe, rBa; \ - ROLQ $1, rBa; \ - XORQ rBa, rT1; \ - MOVQ rT1, _ka(oState); \ - K_RT1_RCA; \ - \ - XORQ rDu, rBo; \ - ROLQ $8, rBo; \ - MOVQ rBi, rT1; \ - ANDQ rBo, rT1; \ - XORQ rBe, rT1; \ - MOVQ rT1, _ke(oState); \ - K_RT1_RCE; \ - \ - XORQ rDa, rBu; \ - ROLQ $18, rBu; \ - NOTQ rBo; \ - MOVQ rBo, rT1; \ - ANDQ rBu, rT1; \ - XORQ rBi, rT1; \ - MOVQ rT1, _ki(oState); \ - \ - MOVQ rBu, rT1; \ - ORQ rBa, rT1; \ - XORQ rBo, rT1; \ - MOVQ rT1, _ko(oState); \ - \ - ANDQ rBe, rBa; \ - XORQ rBu, rBa; \ - MOVQ rBa, _ku(oState); \ - K_RBA_RCU; \ - \ - /* Result m */ \ - MOVQ _ga(iState), rBe; \ - XORQ rDa, rBe; \ - MOVQ _ke(iState), rBi; \ - ROLQ $36, rBe; \ - XORQ rDe, rBi; \ - MOVQ _bu(iState), rBa; \ - ROLQ $10, rBi; \ - MOVQ rBe, rT1; \ - MOVQ _mi(iState), rBo; \ - ANDQ rBi, rT1; \ - XORQ rDu, rBa; \ - MOVQ _so(iState), rBu; \ - ROLQ $27, rBa; \ - XORQ rBa, rT1; \ - MOVQ rT1, _ma(oState); \ - M_RT1_RCA; \ - \ - XORQ rDi, rBo; \ - ROLQ $15, rBo; \ - MOVQ rBi, rT1; \ - ORQ rBo, rT1; \ - XORQ rBe, rT1; \ - MOVQ rT1, _me(oState); \ - M_RT1_RCE; \ - \ - XORQ rDo, rBu; \ - ROLQ $56, rBu; \ - NOTQ rBo; \ - MOVQ rBo, rT1; \ - ORQ rBu, rT1; \ - XORQ rBi, rT1; \ - MOVQ rT1, _mi(oState); \ - \ - ORQ rBa, rBe; \ - XORQ rBu, rBe; \ - MOVQ rBe, _mu(oState); \ - \ - ANDQ rBa, rBu; \ - XORQ rBo, rBu; \ - MOVQ rBu, _mo(oState); \ - M_RBE_RCU; \ - \ - /* Result s */ \ - MOVQ _bi(iState), rBa; \ - MOVQ _go(iState), rBe; \ - MOVQ _ku(iState), rBi; \ - XORQ rDi, rBa; \ - MOVQ _ma(iState), rBo; \ - ROLQ $62, rBa; \ - XORQ rDo, rBe; \ - MOVQ _se(iState), rBu; \ - ROLQ $55, rBe; \ - \ - XORQ rDu, rBi; \ - MOVQ rBa, rDu; \ - XORQ rDe, rBu; \ - ROLQ $2, rBu; \ - ANDQ rBe, rDu; \ - XORQ rBu, rDu; \ - MOVQ rDu, _su(oState); \ - \ - ROLQ $39, rBi; \ - S_RDU_RCU; \ - NOTQ rBe; \ - XORQ rDa, rBo; \ - MOVQ rBe, rDa; \ - ANDQ rBi, rDa; \ - XORQ rBa, rDa; \ - MOVQ rDa, _sa(oState); \ - S_RDA_RCA; \ - \ - ROLQ $41, rBo; \ - MOVQ rBi, rDe; \ - ORQ rBo, rDe; \ - XORQ rBe, rDe; \ - MOVQ rDe, _se(oState); \ - S_RDE_RCE; \ - \ - MOVQ rBo, rDi; \ - MOVQ rBu, rDo; \ - ANDQ rBu, rDi; \ - ORQ rBa, rDo; \ - XORQ rBi, rDi; \ - XORQ rBo, rDo; \ - MOVQ rDi, _si(oState); \ - MOVQ rDo, _so(oState) \ - // func keccakF1600(a *[25]uint64) -TEXT ·keccakF1600(SB), 0, $200-8 - MOVQ a+0(FP), rpState +TEXT ·keccakF1600(SB), $200-8 + MOVQ a+0(FP), DI // Convert the user state into an internal state - NOTQ _be(rpState) - NOTQ _bi(rpState) - NOTQ _go(rpState) - NOTQ _ki(rpState) - NOTQ _mi(rpState) - NOTQ _sa(rpState) + NOTQ 8(DI) + NOTQ 16(DI) + NOTQ 64(DI) + NOTQ 96(DI) + NOTQ 136(DI) + NOTQ 160(DI) // Execute the KeccakF permutation - MOVQ _ba(rpState), rCa - MOVQ _be(rpState), rCe - MOVQ _bu(rpState), rCu - - XORQ _ga(rpState), rCa - XORQ _ge(rpState), rCe - XORQ _gu(rpState), rCu - - XORQ _ka(rpState), rCa - XORQ _ke(rpState), rCe - XORQ _ku(rpState), rCu - - XORQ _ma(rpState), rCa - XORQ _me(rpState), rCe - XORQ _mu(rpState), rCu - - XORQ _sa(rpState), rCa - XORQ _se(rpState), rCe - MOVQ _si(rpState), rDi - MOVQ _so(rpState), rDo - XORQ _su(rpState), rCu - - mKeccakRound(rpState, rpStack, $0x0000000000000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpStack, rpState, $0x0000000000008082, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpState, rpStack, $0x800000000000808a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpStack, rpState, $0x8000000080008000, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpState, rpStack, $0x000000000000808b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpStack, rpState, $0x0000000080000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpState, rpStack, $0x8000000080008081, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpStack, rpState, $0x8000000000008009, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpState, rpStack, $0x000000000000008a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpStack, rpState, $0x0000000000000088, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpState, rpStack, $0x0000000080008009, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpStack, rpState, $0x000000008000000a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpState, rpStack, $0x000000008000808b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpStack, rpState, $0x800000000000008b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpState, rpStack, $0x8000000000008089, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpStack, rpState, $0x8000000000008003, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpState, rpStack, $0x8000000000008002, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpStack, rpState, $0x8000000000000080, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpState, rpStack, $0x000000000000800a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpStack, rpState, $0x800000008000000a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpState, rpStack, $0x8000000080008081, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpStack, rpState, $0x8000000000008080, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpState, rpStack, $0x0000000080000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE) - mKeccakRound(rpStack, rpState, $0x8000000080008008, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP) + MOVQ (DI), SI + MOVQ 8(DI), BP + MOVQ 32(DI), R15 + XORQ 40(DI), SI + XORQ 48(DI), BP + XORQ 72(DI), R15 + XORQ 80(DI), SI + XORQ 88(DI), BP + XORQ 112(DI), R15 + XORQ 120(DI), SI + XORQ 128(DI), BP + XORQ 152(DI), R15 + XORQ 160(DI), SI + XORQ 168(DI), BP + MOVQ 176(DI), DX + MOVQ 184(DI), R8 + XORQ 192(DI), R15 - // Revert the internal state to the user state - NOTQ _be(rpState) - NOTQ _bi(rpState) - NOTQ _go(rpState) - NOTQ _ki(rpState) - NOTQ _mi(rpState) - NOTQ _sa(rpState) + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x0000000000000001, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x0000000000008082, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + MOVQ R12, BP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + XORQ R10, R15 + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + XORQ R11, R15 + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x800000000000808a, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x8000000080008000, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + MOVQ R12, BP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + XORQ R10, R15 + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + XORQ R11, R15 + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x000000000000808b, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x0000000080000001, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + MOVQ R12, BP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + XORQ R10, R15 + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + XORQ R11, R15 + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x8000000080008081, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x8000000000008009, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + MOVQ R12, BP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + XORQ R10, R15 + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + XORQ R11, R15 + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x000000000000008a, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x0000000000000088, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + MOVQ R12, BP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + XORQ R10, R15 + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + XORQ R11, R15 + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x0000000080008009, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x000000008000000a, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + MOVQ R12, BP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + XORQ R10, R15 + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + XORQ R11, R15 + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x000000008000808b, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x800000000000008b, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + MOVQ R12, BP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + XORQ R10, R15 + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + XORQ R11, R15 + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x8000000000008089, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x8000000000008003, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + MOVQ R12, BP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + XORQ R10, R15 + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + XORQ R11, R15 + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x8000000000008002, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x8000000000000080, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + MOVQ R12, BP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + XORQ R10, R15 + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + XORQ R11, R15 + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x000000000000800a, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x800000008000000a, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + MOVQ R12, BP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + XORQ R10, R15 + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + XORQ R11, R15 + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x8000000080008081, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x8000000000008080, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + MOVQ R12, BP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + XORQ R10, R15 + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + XORQ R11, R15 + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(DI), R12 + XORQ 56(DI), DX + XORQ R15, BX + XORQ 96(DI), R12 + XORQ 136(DI), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(DI), R13 + XORQ 64(DI), R8 + XORQ SI, CX + XORQ 104(DI), R13 + XORQ 144(DI), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (DI), R10 + MOVQ 48(DI), R11 + XORQ R13, R9 + MOVQ 96(DI), R12 + MOVQ 144(DI), R13 + MOVQ 192(DI), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x0000000080000001, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (SP) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(SP) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(SP) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(SP) + MOVQ R12, 8(SP) + MOVQ R12, BP + + // Result g + MOVQ 72(DI), R11 + XORQ R9, R11 + MOVQ 80(DI), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(DI), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(DI), R13 + MOVQ 176(DI), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(SP) + XORQ AX, SI + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(SP) + XORQ AX, BP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(SP) + NOTQ R14 + XORQ R10, R15 + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(SP) + + // Result k + MOVQ 8(DI), R10 + MOVQ 56(DI), R11 + MOVQ 104(DI), R12 + MOVQ 152(DI), R13 + MOVQ 160(DI), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(SP) + XORQ AX, SI + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(SP) + XORQ AX, BP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(SP) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(SP) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(SP) + XORQ R10, R15 + + // Result m + MOVQ 40(DI), R11 + XORQ BX, R11 + MOVQ 88(DI), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(DI), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(DI), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(DI), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(SP) + XORQ AX, SI + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(SP) + XORQ AX, BP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(SP) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(SP) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(SP) + XORQ R11, R15 + + // Result s + MOVQ 16(DI), R10 + MOVQ 64(DI), R11 + MOVQ 112(DI), R12 + XORQ DX, R10 + MOVQ 120(DI), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(DI), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(SP) + ROLQ $0x27, R12 + XORQ R9, R15 + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(SP) + XORQ BX, SI + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(SP) + XORQ CX, BP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(SP) + MOVQ R8, 184(SP) + + // Prepare round + MOVQ BP, BX + ROLQ $0x01, BX + MOVQ 16(SP), R12 + XORQ 56(SP), DX + XORQ R15, BX + XORQ 96(SP), R12 + XORQ 136(SP), DX + XORQ DX, R12 + MOVQ R12, CX + ROLQ $0x01, CX + MOVQ 24(SP), R13 + XORQ 64(SP), R8 + XORQ SI, CX + XORQ 104(SP), R13 + XORQ 144(SP), R8 + XORQ R8, R13 + MOVQ R13, DX + ROLQ $0x01, DX + MOVQ R15, R8 + XORQ BP, DX + ROLQ $0x01, R8 + MOVQ SI, R9 + XORQ R12, R8 + ROLQ $0x01, R9 + + // Result b + MOVQ (SP), R10 + MOVQ 48(SP), R11 + XORQ R13, R9 + MOVQ 96(SP), R12 + MOVQ 144(SP), R13 + MOVQ 192(SP), R14 + XORQ CX, R11 + ROLQ $0x2c, R11 + XORQ DX, R12 + XORQ BX, R10 + ROLQ $0x2b, R12 + MOVQ R11, SI + MOVQ $0x8000000080008008, AX + ORQ R12, SI + XORQ R10, AX + XORQ AX, SI + MOVQ SI, (DI) + XORQ R9, R14 + ROLQ $0x0e, R14 + MOVQ R10, R15 + ANDQ R11, R15 + XORQ R14, R15 + MOVQ R15, 32(DI) + XORQ R8, R13 + ROLQ $0x15, R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 16(DI) + NOTQ R12 + ORQ R10, R14 + ORQ R13, R12 + XORQ R13, R14 + XORQ R11, R12 + MOVQ R14, 24(DI) + MOVQ R12, 8(DI) + NOP + + // Result g + MOVQ 72(SP), R11 + XORQ R9, R11 + MOVQ 80(SP), R12 + ROLQ $0x14, R11 + XORQ BX, R12 + ROLQ $0x03, R12 + MOVQ 24(SP), R10 + MOVQ R11, AX + ORQ R12, AX + XORQ R8, R10 + MOVQ 128(SP), R13 + MOVQ 176(SP), R14 + ROLQ $0x1c, R10 + XORQ R10, AX + MOVQ AX, 40(DI) + NOP + XORQ CX, R13 + ROLQ $0x2d, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 48(DI) + NOP + XORQ DX, R14 + ROLQ $0x3d, R14 + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 64(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 72(DI) + NOTQ R14 + NOP + ORQ R14, R13 + XORQ R12, R13 + MOVQ R13, 56(DI) + + // Result k + MOVQ 8(SP), R10 + MOVQ 56(SP), R11 + MOVQ 104(SP), R12 + MOVQ 152(SP), R13 + MOVQ 160(SP), R14 + XORQ DX, R11 + ROLQ $0x06, R11 + XORQ R8, R12 + ROLQ $0x19, R12 + MOVQ R11, AX + ORQ R12, AX + XORQ CX, R10 + ROLQ $0x01, R10 + XORQ R10, AX + MOVQ AX, 80(DI) + NOP + XORQ R9, R13 + ROLQ $0x08, R13 + MOVQ R12, AX + ANDQ R13, AX + XORQ R11, AX + MOVQ AX, 88(DI) + NOP + XORQ BX, R14 + ROLQ $0x12, R14 + NOTQ R13 + MOVQ R13, AX + ANDQ R14, AX + XORQ R12, AX + MOVQ AX, 96(DI) + MOVQ R14, AX + ORQ R10, AX + XORQ R13, AX + MOVQ AX, 104(DI) + ANDQ R11, R10 + XORQ R14, R10 + MOVQ R10, 112(DI) + NOP + + // Result m + MOVQ 40(SP), R11 + XORQ BX, R11 + MOVQ 88(SP), R12 + ROLQ $0x24, R11 + XORQ CX, R12 + MOVQ 32(SP), R10 + ROLQ $0x0a, R12 + MOVQ R11, AX + MOVQ 136(SP), R13 + ANDQ R12, AX + XORQ R9, R10 + MOVQ 184(SP), R14 + ROLQ $0x1b, R10 + XORQ R10, AX + MOVQ AX, 120(DI) + NOP + XORQ DX, R13 + ROLQ $0x0f, R13 + MOVQ R12, AX + ORQ R13, AX + XORQ R11, AX + MOVQ AX, 128(DI) + NOP + XORQ R8, R14 + ROLQ $0x38, R14 + NOTQ R13 + MOVQ R13, AX + ORQ R14, AX + XORQ R12, AX + MOVQ AX, 136(DI) + ORQ R10, R11 + XORQ R14, R11 + MOVQ R11, 152(DI) + ANDQ R10, R14 + XORQ R13, R14 + MOVQ R14, 144(DI) + NOP + + // Result s + MOVQ 16(SP), R10 + MOVQ 64(SP), R11 + MOVQ 112(SP), R12 + XORQ DX, R10 + MOVQ 120(SP), R13 + ROLQ $0x3e, R10 + XORQ R8, R11 + MOVQ 168(SP), R14 + ROLQ $0x37, R11 + XORQ R9, R12 + MOVQ R10, R9 + XORQ CX, R14 + ROLQ $0x02, R14 + ANDQ R11, R9 + XORQ R14, R9 + MOVQ R9, 192(DI) + ROLQ $0x27, R12 + NOP + NOTQ R11 + XORQ BX, R13 + MOVQ R11, BX + ANDQ R12, BX + XORQ R10, BX + MOVQ BX, 160(DI) + NOP + ROLQ $0x29, R13 + MOVQ R12, CX + ORQ R13, CX + XORQ R11, CX + MOVQ CX, 168(DI) + NOP + MOVQ R13, DX + MOVQ R14, R8 + ANDQ R14, DX + ORQ R10, R8 + XORQ R12, DX + XORQ R13, R8 + MOVQ DX, 176(DI) + MOVQ R8, 184(DI) + + // Revert the internal state to the user state + NOTQ 8(DI) + NOTQ 16(DI) + NOTQ 64(DI) + NOTQ 96(DI) + NOTQ 136(DI) + NOTQ 160(DI) RET diff --git a/vendor/golang.org/x/crypto/sha3/register.go b/vendor/golang.org/x/crypto/sha3/register.go deleted file mode 100644 index addfd504..00000000 --- a/vendor/golang.org/x/crypto/sha3/register.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.4 - -package sha3 - -import ( - "crypto" -) - -func init() { - crypto.RegisterHash(crypto.SHA3_224, New224) - crypto.RegisterHash(crypto.SHA3_256, New256) - crypto.RegisterHash(crypto.SHA3_384, New384) - crypto.RegisterHash(crypto.SHA3_512, New512) -} diff --git a/vendor/golang.org/x/crypto/sha3/sha3.go b/vendor/golang.org/x/crypto/sha3/sha3.go index 4884d172..6658c444 100644 --- a/vendor/golang.org/x/crypto/sha3/sha3.go +++ b/vendor/golang.org/x/crypto/sha3/sha3.go @@ -4,6 +4,15 @@ package sha3 +import ( + "crypto/subtle" + "encoding/binary" + "errors" + "unsafe" + + "golang.org/x/sys/cpu" +) + // spongeDirection indicates the direction bytes are flowing through the sponge. type spongeDirection int @@ -14,17 +23,13 @@ const ( spongeSqueezing ) -const ( - // maxRate is the maximum size of the internal buffer. SHAKE-256 - // currently needs the largest buffer. - maxRate = 168 -) - type state struct { - // Generic sponge components. - a [25]uint64 // main state of the hash - buf []byte // points into storage - rate int // the number of bytes of state to use + a [1600 / 8]byte // main state of the hash + + // a[n:rate] is the buffer. If absorbing, it's the remaining space to XOR + // into before running the permutation. If squeezing, it's the remaining + // output to produce before running the permutation. + n, rate int // dsbyte contains the "domain separation" bits and the first bit of // the padding. Sections 6.1 and 6.2 of [1] separate the outputs of the @@ -40,9 +45,6 @@ type state struct { // Extendable-Output Functions (May 2014)" dsbyte byte - storage storageBuf - - // Specific to SHA-3 and SHAKE. outputLen int // the default output size in bytes state spongeDirection // whether the sponge is absorbing or squeezing } @@ -54,103 +56,77 @@ func (d *state) BlockSize() int { return d.rate } func (d *state) Size() int { return d.outputLen } // Reset clears the internal state by zeroing the sponge state and -// the byte buffer, and setting Sponge.state to absorbing. +// the buffer indexes, and setting Sponge.state to absorbing. func (d *state) Reset() { // Zero the permutation's state. for i := range d.a { d.a[i] = 0 } d.state = spongeAbsorbing - d.buf = d.storage.asBytes()[:0] + d.n = 0 } func (d *state) clone() *state { ret := *d - if ret.state == spongeAbsorbing { - ret.buf = ret.storage.asBytes()[:len(ret.buf)] - } else { - ret.buf = ret.storage.asBytes()[d.rate-cap(d.buf) : d.rate] - } - return &ret } -// permute applies the KeccakF-1600 permutation. It handles -// any input-output buffering. +// permute applies the KeccakF-1600 permutation. func (d *state) permute() { - switch d.state { - case spongeAbsorbing: - // If we're absorbing, we need to xor the input into the state - // before applying the permutation. - xorIn(d, d.buf) - d.buf = d.storage.asBytes()[:0] - keccakF1600(&d.a) - case spongeSqueezing: - // If we're squeezing, we need to apply the permutation before - // copying more output. - keccakF1600(&d.a) - d.buf = d.storage.asBytes()[:d.rate] - copyOut(d, d.buf) + var a *[25]uint64 + if cpu.IsBigEndian { + a = new([25]uint64) + for i := range a { + a[i] = binary.LittleEndian.Uint64(d.a[i*8:]) + } + } else { + a = (*[25]uint64)(unsafe.Pointer(&d.a)) + } + + keccakF1600(a) + d.n = 0 + + if cpu.IsBigEndian { + for i := range a { + binary.LittleEndian.PutUint64(d.a[i*8:], a[i]) + } } } // pads appends the domain separation bits in dsbyte, applies // the multi-bitrate 10..1 padding rule, and permutes the state. -func (d *state) padAndPermute(dsbyte byte) { - if d.buf == nil { - d.buf = d.storage.asBytes()[:0] - } +func (d *state) padAndPermute() { // Pad with this instance's domain-separator bits. We know that there's - // at least one byte of space in d.buf because, if it were full, + // at least one byte of space in the sponge because, if it were full, // permute would have been called to empty it. dsbyte also contains the // first one bit for the padding. See the comment in the state struct. - d.buf = append(d.buf, dsbyte) - zerosStart := len(d.buf) - d.buf = d.storage.asBytes()[:d.rate] - for i := zerosStart; i < d.rate; i++ { - d.buf[i] = 0 - } + d.a[d.n] ^= d.dsbyte // This adds the final one bit for the padding. Because of the way that // bits are numbered from the LSB upwards, the final bit is the MSB of // the last byte. - d.buf[d.rate-1] ^= 0x80 + d.a[d.rate-1] ^= 0x80 // Apply the permutation d.permute() d.state = spongeSqueezing - d.buf = d.storage.asBytes()[:d.rate] - copyOut(d, d.buf) } // Write absorbs more data into the hash's state. It panics if any // output has already been read. -func (d *state) Write(p []byte) (written int, err error) { +func (d *state) Write(p []byte) (n int, err error) { if d.state != spongeAbsorbing { panic("sha3: Write after Read") } - if d.buf == nil { - d.buf = d.storage.asBytes()[:0] - } - written = len(p) + + n = len(p) for len(p) > 0 { - if len(d.buf) == 0 && len(p) >= d.rate { - // The fast path; absorb a full "rate" bytes of input and apply the permutation. - xorIn(d, p[:d.rate]) - p = p[d.rate:] - keccakF1600(&d.a) - } else { - // The slow path; buffer the input until we can fill the sponge, and then xor it in. - todo := d.rate - len(d.buf) - if todo > len(p) { - todo = len(p) - } - d.buf = append(d.buf, p[:todo]...) - p = p[todo:] - - // If the sponge is full, apply the permutation. - if len(d.buf) == d.rate { - d.permute() - } + x := subtle.XORBytes(d.a[d.n:d.rate], d.a[d.n:d.rate], p) + d.n += x + p = p[x:] + + // If the sponge is full, apply the permutation. + if d.n == d.rate { + d.permute() } } @@ -161,21 +137,21 @@ func (d *state) Write(p []byte) (written int, err error) { func (d *state) Read(out []byte) (n int, err error) { // If we're still absorbing, pad and apply the permutation. if d.state == spongeAbsorbing { - d.padAndPermute(d.dsbyte) + d.padAndPermute() } n = len(out) // Now, do the squeezing. for len(out) > 0 { - n := copy(out, d.buf) - d.buf = d.buf[n:] - out = out[n:] - // Apply the permutation if we've squeezed the sponge dry. - if len(d.buf) == 0 { + if d.n == d.rate { d.permute() } + + x := copy(out, d.a[d.n:d.rate]) + d.n += x + out = out[x:] } return @@ -195,3 +171,74 @@ func (d *state) Sum(in []byte) []byte { dup.Read(hash) return append(in, hash...) } + +const ( + magicSHA3 = "sha\x08" + magicShake = "sha\x09" + magicCShake = "sha\x0a" + magicKeccak = "sha\x0b" + // magic || rate || main state || n || sponge direction + marshaledSize = len(magicSHA3) + 1 + 200 + 1 + 1 +) + +func (d *state) MarshalBinary() ([]byte, error) { + return d.AppendBinary(make([]byte, 0, marshaledSize)) +} + +func (d *state) AppendBinary(b []byte) ([]byte, error) { + switch d.dsbyte { + case dsbyteSHA3: + b = append(b, magicSHA3...) + case dsbyteShake: + b = append(b, magicShake...) + case dsbyteCShake: + b = append(b, magicCShake...) + case dsbyteKeccak: + b = append(b, magicKeccak...) + default: + panic("unknown dsbyte") + } + // rate is at most 168, and n is at most rate. + b = append(b, byte(d.rate)) + b = append(b, d.a[:]...) + b = append(b, byte(d.n), byte(d.state)) + return b, nil +} + +func (d *state) UnmarshalBinary(b []byte) error { + if len(b) != marshaledSize { + return errors.New("sha3: invalid hash state") + } + + magic := string(b[:len(magicSHA3)]) + b = b[len(magicSHA3):] + switch { + case magic == magicSHA3 && d.dsbyte == dsbyteSHA3: + case magic == magicShake && d.dsbyte == dsbyteShake: + case magic == magicCShake && d.dsbyte == dsbyteCShake: + case magic == magicKeccak && d.dsbyte == dsbyteKeccak: + default: + return errors.New("sha3: invalid hash state identifier") + } + + rate := int(b[0]) + b = b[1:] + if rate != d.rate { + return errors.New("sha3: invalid hash state function") + } + + copy(d.a[:], b) + b = b[len(d.a):] + + n, state := int(b[0]), spongeDirection(b[1]) + if n > d.rate { + return errors.New("sha3: invalid hash state") + } + d.n = n + if state != spongeAbsorbing && state != spongeSqueezing { + return errors.New("sha3: invalid hash state") + } + d.state = state + + return nil +} diff --git a/vendor/golang.org/x/crypto/sha3/sha3_s390x.go b/vendor/golang.org/x/crypto/sha3/sha3_s390x.go index b4fbbf86..00d8034a 100644 --- a/vendor/golang.org/x/crypto/sha3/sha3_s390x.go +++ b/vendor/golang.org/x/crypto/sha3/sha3_s390x.go @@ -248,56 +248,56 @@ func (s *asmState) Clone() ShakeHash { return s.clone() } -// new224Asm returns an assembly implementation of SHA3-224 if available, -// otherwise it returns nil. -func new224Asm() hash.Hash { +// new224 returns an assembly implementation of SHA3-224 if available, +// otherwise it returns a generic implementation. +func new224() hash.Hash { if cpu.S390X.HasSHA3 { return newAsmState(sha3_224) } - return nil + return new224Generic() } -// new256Asm returns an assembly implementation of SHA3-256 if available, -// otherwise it returns nil. -func new256Asm() hash.Hash { +// new256 returns an assembly implementation of SHA3-256 if available, +// otherwise it returns a generic implementation. +func new256() hash.Hash { if cpu.S390X.HasSHA3 { return newAsmState(sha3_256) } - return nil + return new256Generic() } -// new384Asm returns an assembly implementation of SHA3-384 if available, -// otherwise it returns nil. -func new384Asm() hash.Hash { +// new384 returns an assembly implementation of SHA3-384 if available, +// otherwise it returns a generic implementation. +func new384() hash.Hash { if cpu.S390X.HasSHA3 { return newAsmState(sha3_384) } - return nil + return new384Generic() } -// new512Asm returns an assembly implementation of SHA3-512 if available, -// otherwise it returns nil. -func new512Asm() hash.Hash { +// new512 returns an assembly implementation of SHA3-512 if available, +// otherwise it returns a generic implementation. +func new512() hash.Hash { if cpu.S390X.HasSHA3 { return newAsmState(sha3_512) } - return nil + return new512Generic() } -// newShake128Asm returns an assembly implementation of SHAKE-128 if available, -// otherwise it returns nil. -func newShake128Asm() ShakeHash { +// newShake128 returns an assembly implementation of SHAKE-128 if available, +// otherwise it returns a generic implementation. +func newShake128() ShakeHash { if cpu.S390X.HasSHA3 { return newAsmState(shake_128) } - return nil + return newShake128Generic() } -// newShake256Asm returns an assembly implementation of SHAKE-256 if available, -// otherwise it returns nil. -func newShake256Asm() ShakeHash { +// newShake256 returns an assembly implementation of SHAKE-256 if available, +// otherwise it returns a generic implementation. +func newShake256() ShakeHash { if cpu.S390X.HasSHA3 { return newAsmState(shake_256) } - return nil + return newShake256Generic() } diff --git a/vendor/golang.org/x/crypto/sha3/shake.go b/vendor/golang.org/x/crypto/sha3/shake.go index bb699840..a6b3a428 100644 --- a/vendor/golang.org/x/crypto/sha3/shake.go +++ b/vendor/golang.org/x/crypto/sha3/shake.go @@ -16,9 +16,12 @@ package sha3 // [2] https://doi.org/10.6028/NIST.SP.800-185 import ( + "bytes" "encoding/binary" + "errors" "hash" "io" + "math/bits" ) // ShakeHash defines the interface to hash functions that support @@ -50,44 +53,36 @@ type cshakeState struct { initBlock []byte } -// Consts for configuring initial SHA-3 state -const ( - dsbyteShake = 0x1f - dsbyteCShake = 0x04 - rate128 = 168 - rate256 = 136 -) +func bytepad(data []byte, rate int) []byte { + out := make([]byte, 0, 9+len(data)+rate-1) + out = append(out, leftEncode(uint64(rate))...) + out = append(out, data...) + if padlen := rate - len(out)%rate; padlen < rate { + out = append(out, make([]byte, padlen)...) + } + return out +} -func bytepad(input []byte, w int) []byte { - // leftEncode always returns max 9 bytes - buf := make([]byte, 0, 9+len(input)+w) - buf = append(buf, leftEncode(uint64(w))...) - buf = append(buf, input...) - padlen := w - (len(buf) % w) - return append(buf, make([]byte, padlen)...) -} - -func leftEncode(value uint64) []byte { - var b [9]byte - binary.BigEndian.PutUint64(b[1:], value) - // Trim all but last leading zero bytes - i := byte(1) - for i < 8 && b[i] == 0 { - i++ +func leftEncode(x uint64) []byte { + // Let n be the smallest positive integer for which 2^(8n) > x. + n := (bits.Len64(x) + 7) / 8 + if n == 0 { + n = 1 } - // Prepend number of encoded bytes - b[i-1] = 9 - i - return b[i-1:] + // Return n || x with n as a byte and x an n bytes in big-endian order. + b := make([]byte, 9) + binary.BigEndian.PutUint64(b[1:], x) + b = b[9-n-1:] + b[0] = byte(n) + return b } func newCShake(N, S []byte, rate, outputLen int, dsbyte byte) ShakeHash { c := cshakeState{state: &state{rate: rate, outputLen: outputLen, dsbyte: dsbyte}} - - // leftEncode returns max 9 bytes - c.initBlock = make([]byte, 0, 9*2+len(N)+len(S)) - c.initBlock = append(c.initBlock, leftEncode(uint64(len(N)*8))...) + c.initBlock = make([]byte, 0, 9+len(N)+9+len(S)) // leftEncode returns max 9 bytes + c.initBlock = append(c.initBlock, leftEncode(uint64(len(N))*8)...) c.initBlock = append(c.initBlock, N...) - c.initBlock = append(c.initBlock, leftEncode(uint64(len(S)*8))...) + c.initBlock = append(c.initBlock, leftEncode(uint64(len(S))*8)...) c.initBlock = append(c.initBlock, S...) c.Write(bytepad(c.initBlock, c.rate)) return &c @@ -111,24 +106,50 @@ func (c *state) Clone() ShakeHash { return c.clone() } +func (c *cshakeState) MarshalBinary() ([]byte, error) { + return c.AppendBinary(make([]byte, 0, marshaledSize+len(c.initBlock))) +} + +func (c *cshakeState) AppendBinary(b []byte) ([]byte, error) { + b, err := c.state.AppendBinary(b) + if err != nil { + return nil, err + } + b = append(b, c.initBlock...) + return b, nil +} + +func (c *cshakeState) UnmarshalBinary(b []byte) error { + if len(b) <= marshaledSize { + return errors.New("sha3: invalid hash state") + } + if err := c.state.UnmarshalBinary(b[:marshaledSize]); err != nil { + return err + } + c.initBlock = bytes.Clone(b[marshaledSize:]) + return nil +} + // NewShake128 creates a new SHAKE128 variable-output-length ShakeHash. // Its generic security strength is 128 bits against all attacks if at // least 32 bytes of its output are used. func NewShake128() ShakeHash { - if h := newShake128Asm(); h != nil { - return h - } - return &state{rate: rate128, outputLen: 32, dsbyte: dsbyteShake} + return newShake128() } // NewShake256 creates a new SHAKE256 variable-output-length ShakeHash. // Its generic security strength is 256 bits against all attacks if // at least 64 bytes of its output are used. func NewShake256() ShakeHash { - if h := newShake256Asm(); h != nil { - return h - } - return &state{rate: rate256, outputLen: 64, dsbyte: dsbyteShake} + return newShake256() +} + +func newShake128Generic() *state { + return &state{rate: rateK256, outputLen: 32, dsbyte: dsbyteShake} +} + +func newShake256Generic() *state { + return &state{rate: rateK512, outputLen: 64, dsbyte: dsbyteShake} } // NewCShake128 creates a new instance of cSHAKE128 variable-output-length ShakeHash, @@ -141,7 +162,7 @@ func NewCShake128(N, S []byte) ShakeHash { if len(N) == 0 && len(S) == 0 { return NewShake128() } - return newCShake(N, S, rate128, 32, dsbyteCShake) + return newCShake(N, S, rateK256, 32, dsbyteCShake) } // NewCShake256 creates a new instance of cSHAKE256 variable-output-length ShakeHash, @@ -154,7 +175,7 @@ func NewCShake256(N, S []byte) ShakeHash { if len(N) == 0 && len(S) == 0 { return NewShake256() } - return newCShake(N, S, rate256, 64, dsbyteCShake) + return newCShake(N, S, rateK512, 64, dsbyteCShake) } // ShakeSum128 writes an arbitrary-length digest of data into hash. diff --git a/vendor/golang.org/x/crypto/sha3/shake_generic.go b/vendor/golang.org/x/crypto/sha3/shake_generic.go deleted file mode 100644 index 8d31cf5b..00000000 --- a/vendor/golang.org/x/crypto/sha3/shake_generic.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !gc || purego || !s390x - -package sha3 - -// newShake128Asm returns an assembly implementation of SHAKE-128 if available, -// otherwise it returns nil. -func newShake128Asm() ShakeHash { - return nil -} - -// newShake256Asm returns an assembly implementation of SHAKE-256 if available, -// otherwise it returns nil. -func newShake256Asm() ShakeHash { - return nil -} diff --git a/vendor/golang.org/x/crypto/sha3/shake_noasm.go b/vendor/golang.org/x/crypto/sha3/shake_noasm.go new file mode 100644 index 00000000..4276ba4a --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/shake_noasm.go @@ -0,0 +1,15 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !gc || purego || !s390x + +package sha3 + +func newShake128() *state { + return newShake128Generic() +} + +func newShake256() *state { + return newShake256Generic() +} diff --git a/vendor/golang.org/x/crypto/sha3/xor.go b/vendor/golang.org/x/crypto/sha3/xor.go deleted file mode 100644 index 7337cca8..00000000 --- a/vendor/golang.org/x/crypto/sha3/xor.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build (!amd64 && !386 && !ppc64le) || purego - -package sha3 - -// A storageBuf is an aligned array of maxRate bytes. -type storageBuf [maxRate]byte - -func (b *storageBuf) asBytes() *[maxRate]byte { - return (*[maxRate]byte)(b) -} - -var ( - xorIn = xorInGeneric - copyOut = copyOutGeneric - xorInUnaligned = xorInGeneric - copyOutUnaligned = copyOutGeneric -) - -const xorImplementationUnaligned = "generic" diff --git a/vendor/golang.org/x/crypto/sha3/xor_generic.go b/vendor/golang.org/x/crypto/sha3/xor_generic.go deleted file mode 100644 index 8d947711..00000000 --- a/vendor/golang.org/x/crypto/sha3/xor_generic.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package sha3 - -import "encoding/binary" - -// xorInGeneric xors the bytes in buf into the state; it -// makes no non-portable assumptions about memory layout -// or alignment. -func xorInGeneric(d *state, buf []byte) { - n := len(buf) / 8 - - for i := 0; i < n; i++ { - a := binary.LittleEndian.Uint64(buf) - d.a[i] ^= a - buf = buf[8:] - } -} - -// copyOutGeneric copies uint64s to a byte buffer. -func copyOutGeneric(d *state, b []byte) { - for i := 0; len(b) >= 8; i++ { - binary.LittleEndian.PutUint64(b, d.a[i]) - b = b[8:] - } -} diff --git a/vendor/golang.org/x/crypto/sha3/xor_unaligned.go b/vendor/golang.org/x/crypto/sha3/xor_unaligned.go deleted file mode 100644 index 870e2d16..00000000 --- a/vendor/golang.org/x/crypto/sha3/xor_unaligned.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build (amd64 || 386 || ppc64le) && !purego - -package sha3 - -import "unsafe" - -// A storageBuf is an aligned array of maxRate bytes. -type storageBuf [maxRate / 8]uint64 - -func (b *storageBuf) asBytes() *[maxRate]byte { - return (*[maxRate]byte)(unsafe.Pointer(b)) -} - -// xorInUnaligned uses unaligned reads and writes to update d.a to contain d.a -// XOR buf. -func xorInUnaligned(d *state, buf []byte) { - n := len(buf) - bw := (*[maxRate / 8]uint64)(unsafe.Pointer(&buf[0]))[: n/8 : n/8] - if n >= 72 { - d.a[0] ^= bw[0] - d.a[1] ^= bw[1] - d.a[2] ^= bw[2] - d.a[3] ^= bw[3] - d.a[4] ^= bw[4] - d.a[5] ^= bw[5] - d.a[6] ^= bw[6] - d.a[7] ^= bw[7] - d.a[8] ^= bw[8] - } - if n >= 104 { - d.a[9] ^= bw[9] - d.a[10] ^= bw[10] - d.a[11] ^= bw[11] - d.a[12] ^= bw[12] - } - if n >= 136 { - d.a[13] ^= bw[13] - d.a[14] ^= bw[14] - d.a[15] ^= bw[15] - d.a[16] ^= bw[16] - } - if n >= 144 { - d.a[17] ^= bw[17] - } - if n >= 168 { - d.a[18] ^= bw[18] - d.a[19] ^= bw[19] - d.a[20] ^= bw[20] - } -} - -func copyOutUnaligned(d *state, buf []byte) { - ab := (*[maxRate]uint8)(unsafe.Pointer(&d.a[0])) - copy(buf, ab[:]) -} - -var ( - xorIn = xorInUnaligned - copyOut = copyOutUnaligned -) - -const xorImplementationUnaligned = "unaligned" diff --git a/vendor/golang.org/x/net/LICENSE b/vendor/golang.org/x/net/LICENSE index 6a66aea5..2a7cf70d 100644 --- a/vendor/golang.org/x/net/LICENSE +++ b/vendor/golang.org/x/net/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/net/html/doc.go b/vendor/golang.org/x/net/html/doc.go index 3a7e5ab1..885c4c59 100644 --- a/vendor/golang.org/x/net/html/doc.go +++ b/vendor/golang.org/x/net/html/doc.go @@ -78,16 +78,11 @@ example, to process each anchor node in depth-first order: if err != nil { // ... } - var f func(*html.Node) - f = func(n *html.Node) { + for n := range doc.Descendants() { if n.Type == html.ElementNode && n.Data == "a" { // Do something with n... } - for c := n.FirstChild; c != nil; c = c.NextSibling { - f(c) - } } - f(doc) The relevant specifications include: https://html.spec.whatwg.org/multipage/syntax.html and diff --git a/vendor/golang.org/x/net/html/iter.go b/vendor/golang.org/x/net/html/iter.go new file mode 100644 index 00000000..54be8fd3 --- /dev/null +++ b/vendor/golang.org/x/net/html/iter.go @@ -0,0 +1,56 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.23 + +package html + +import "iter" + +// Ancestors returns an iterator over the ancestors of n, starting with n.Parent. +// +// Mutating a Node or its parents while iterating may have unexpected results. +func (n *Node) Ancestors() iter.Seq[*Node] { + _ = n.Parent // eager nil check + + return func(yield func(*Node) bool) { + for p := n.Parent; p != nil && yield(p); p = p.Parent { + } + } +} + +// ChildNodes returns an iterator over the immediate children of n, +// starting with n.FirstChild. +// +// Mutating a Node or its children while iterating may have unexpected results. +func (n *Node) ChildNodes() iter.Seq[*Node] { + _ = n.FirstChild // eager nil check + + return func(yield func(*Node) bool) { + for c := n.FirstChild; c != nil && yield(c); c = c.NextSibling { + } + } + +} + +// Descendants returns an iterator over all nodes recursively beneath +// n, excluding n itself. Nodes are visited in depth-first preorder. +// +// Mutating a Node or its descendants while iterating may have unexpected results. +func (n *Node) Descendants() iter.Seq[*Node] { + _ = n.FirstChild // eager nil check + + return func(yield func(*Node) bool) { + n.descendants(yield) + } +} + +func (n *Node) descendants(yield func(*Node) bool) bool { + for c := range n.ChildNodes() { + if !yield(c) || !c.descendants(yield) { + return false + } + } + return true +} diff --git a/vendor/golang.org/x/net/html/node.go b/vendor/golang.org/x/net/html/node.go index 1350eef2..77741a19 100644 --- a/vendor/golang.org/x/net/html/node.go +++ b/vendor/golang.org/x/net/html/node.go @@ -38,6 +38,10 @@ var scopeMarker = Node{Type: scopeMarkerNode} // that it looks like "a maxFrameSize { + conf.MaxReadFrameSize = maxFrameSize + } + + if h2.t1 != nil { + fillNetHTTPTransportConfig(&conf, h2.t1) + } + setConfigDefaults(&conf, false) + return conf +} + +func setDefault[T ~int | ~int32 | ~uint32 | ~int64](v *T, minval, maxval, defval T) { + if *v < minval || *v > maxval { + *v = defval + } +} + +func setConfigDefaults(conf *http2Config, server bool) { + setDefault(&conf.MaxConcurrentStreams, 1, math.MaxUint32, defaultMaxStreams) + setDefault(&conf.MaxEncoderHeaderTableSize, 1, math.MaxUint32, initialHeaderTableSize) + setDefault(&conf.MaxDecoderHeaderTableSize, 1, math.MaxUint32, initialHeaderTableSize) + if server { + setDefault(&conf.MaxUploadBufferPerConnection, initialWindowSize, math.MaxInt32, 1<<20) + } else { + setDefault(&conf.MaxUploadBufferPerConnection, initialWindowSize, math.MaxInt32, transportDefaultConnFlow) + } + if server { + setDefault(&conf.MaxUploadBufferPerStream, 1, math.MaxInt32, 1<<20) + } else { + setDefault(&conf.MaxUploadBufferPerStream, 1, math.MaxInt32, transportDefaultStreamFlow) + } + setDefault(&conf.MaxReadFrameSize, minMaxFrameSize, maxFrameSize, defaultMaxReadFrameSize) + setDefault(&conf.PingTimeout, 1, math.MaxInt64, 15*time.Second) +} + +// adjustHTTP1MaxHeaderSize converts a limit in bytes on the size of an HTTP/1 header +// to an HTTP/2 MAX_HEADER_LIST_SIZE value. +func adjustHTTP1MaxHeaderSize(n int64) int64 { + // http2's count is in a slightly different unit and includes 32 bytes per pair. + // So, take the net/http.Server value and pad it up a bit, assuming 10 headers. + const perFieldOverhead = 32 // per http2 spec + const typicalHeaders = 10 // conservative + return n + typicalHeaders*perFieldOverhead +} diff --git a/vendor/golang.org/x/net/http2/config_go124.go b/vendor/golang.org/x/net/http2/config_go124.go new file mode 100644 index 00000000..e3784123 --- /dev/null +++ b/vendor/golang.org/x/net/http2/config_go124.go @@ -0,0 +1,61 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.24 + +package http2 + +import "net/http" + +// fillNetHTTPServerConfig sets fields in conf from srv.HTTP2. +func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) { + fillNetHTTPConfig(conf, srv.HTTP2) +} + +// fillNetHTTPServerConfig sets fields in conf from tr.HTTP2. +func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) { + fillNetHTTPConfig(conf, tr.HTTP2) +} + +func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) { + if h2 == nil { + return + } + if h2.MaxConcurrentStreams != 0 { + conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams) + } + if h2.MaxEncoderHeaderTableSize != 0 { + conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize) + } + if h2.MaxDecoderHeaderTableSize != 0 { + conf.MaxDecoderHeaderTableSize = uint32(h2.MaxDecoderHeaderTableSize) + } + if h2.MaxConcurrentStreams != 0 { + conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams) + } + if h2.MaxReadFrameSize != 0 { + conf.MaxReadFrameSize = uint32(h2.MaxReadFrameSize) + } + if h2.MaxReceiveBufferPerConnection != 0 { + conf.MaxUploadBufferPerConnection = int32(h2.MaxReceiveBufferPerConnection) + } + if h2.MaxReceiveBufferPerStream != 0 { + conf.MaxUploadBufferPerStream = int32(h2.MaxReceiveBufferPerStream) + } + if h2.SendPingTimeout != 0 { + conf.SendPingTimeout = h2.SendPingTimeout + } + if h2.PingTimeout != 0 { + conf.PingTimeout = h2.PingTimeout + } + if h2.WriteByteTimeout != 0 { + conf.WriteByteTimeout = h2.WriteByteTimeout + } + if h2.PermitProhibitedCipherSuites { + conf.PermitProhibitedCipherSuites = true + } + if h2.CountError != nil { + conf.CountError = h2.CountError + } +} diff --git a/vendor/golang.org/x/net/http2/config_pre_go124.go b/vendor/golang.org/x/net/http2/config_pre_go124.go new file mode 100644 index 00000000..060fd6c6 --- /dev/null +++ b/vendor/golang.org/x/net/http2/config_pre_go124.go @@ -0,0 +1,16 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.24 + +package http2 + +import "net/http" + +// Pre-Go 1.24 fallback. +// The Server.HTTP2 and Transport.HTTP2 config fields were added in Go 1.24. + +func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {} + +func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {} diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index 105c3b27..81faec7e 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -1490,7 +1490,7 @@ func (mh *MetaHeadersFrame) checkPseudos() error { pf := mh.PseudoFields() for i, hf := range pf { switch hf.Name { - case ":method", ":path", ":scheme", ":authority": + case ":method", ":path", ":scheme", ":authority", ":protocol": isRequest = true case ":status": isResponse = true @@ -1498,7 +1498,7 @@ func (mh *MetaHeadersFrame) checkPseudos() error { return pseudoHeaderError(hf.Name) } // Check for duplicates. - // This would be a bad algorithm, but N is 4. + // This would be a bad algorithm, but N is 5. // And this doesn't allocate. for _, hf2 := range pf[:i] { if hf.Name == hf2.Name { diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go index 6f2df281..c7601c90 100644 --- a/vendor/golang.org/x/net/http2/http2.go +++ b/vendor/golang.org/x/net/http2/http2.go @@ -17,24 +17,28 @@ package http2 // import "golang.org/x/net/http2" import ( "bufio" + "context" "crypto/tls" + "errors" "fmt" - "io" + "net" "net/http" "os" "sort" "strconv" "strings" "sync" + "time" "golang.org/x/net/http/httpguts" ) var ( - VerboseLogs bool - logFrameWrites bool - logFrameReads bool - inTests bool + VerboseLogs bool + logFrameWrites bool + logFrameReads bool + inTests bool + disableExtendedConnectProtocol bool ) func init() { @@ -47,6 +51,9 @@ func init() { logFrameWrites = true logFrameReads = true } + if strings.Contains(e, "http2xconnect=0") { + disableExtendedConnectProtocol = true + } } const ( @@ -138,6 +145,10 @@ func (s Setting) Valid() error { if s.Val < 16384 || s.Val > 1<<24-1 { return ConnectionError(ErrCodeProtocol) } + case SettingEnableConnectProtocol: + if s.Val != 1 && s.Val != 0 { + return ConnectionError(ErrCodeProtocol) + } } return nil } @@ -147,21 +158,23 @@ func (s Setting) Valid() error { type SettingID uint16 const ( - SettingHeaderTableSize SettingID = 0x1 - SettingEnablePush SettingID = 0x2 - SettingMaxConcurrentStreams SettingID = 0x3 - SettingInitialWindowSize SettingID = 0x4 - SettingMaxFrameSize SettingID = 0x5 - SettingMaxHeaderListSize SettingID = 0x6 + SettingHeaderTableSize SettingID = 0x1 + SettingEnablePush SettingID = 0x2 + SettingMaxConcurrentStreams SettingID = 0x3 + SettingInitialWindowSize SettingID = 0x4 + SettingMaxFrameSize SettingID = 0x5 + SettingMaxHeaderListSize SettingID = 0x6 + SettingEnableConnectProtocol SettingID = 0x8 ) var settingName = map[SettingID]string{ - SettingHeaderTableSize: "HEADER_TABLE_SIZE", - SettingEnablePush: "ENABLE_PUSH", - SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS", - SettingInitialWindowSize: "INITIAL_WINDOW_SIZE", - SettingMaxFrameSize: "MAX_FRAME_SIZE", - SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE", + SettingHeaderTableSize: "HEADER_TABLE_SIZE", + SettingEnablePush: "ENABLE_PUSH", + SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS", + SettingInitialWindowSize: "INITIAL_WINDOW_SIZE", + SettingMaxFrameSize: "MAX_FRAME_SIZE", + SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE", + SettingEnableConnectProtocol: "ENABLE_CONNECT_PROTOCOL", } func (s SettingID) String() string { @@ -210,12 +223,6 @@ type stringWriter interface { WriteString(s string) (n int, err error) } -// A gate lets two goroutines coordinate their activities. -type gate chan struct{} - -func (g gate) Done() { g <- struct{}{} } -func (g gate) Wait() { <-g } - // A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed). type closeWaiter chan struct{} @@ -241,13 +248,19 @@ func (cw closeWaiter) Wait() { // Its buffered writer is lazily allocated as needed, to minimize // idle memory usage with many connections. type bufferedWriter struct { - _ incomparable - w io.Writer // immutable - bw *bufio.Writer // non-nil when data is buffered + _ incomparable + group synctestGroupInterface // immutable + conn net.Conn // immutable + bw *bufio.Writer // non-nil when data is buffered + byteTimeout time.Duration // immutable, WriteByteTimeout } -func newBufferedWriter(w io.Writer) *bufferedWriter { - return &bufferedWriter{w: w} +func newBufferedWriter(group synctestGroupInterface, conn net.Conn, timeout time.Duration) *bufferedWriter { + return &bufferedWriter{ + group: group, + conn: conn, + byteTimeout: timeout, + } } // bufWriterPoolBufferSize is the size of bufio.Writer's @@ -274,7 +287,7 @@ func (w *bufferedWriter) Available() int { func (w *bufferedWriter) Write(p []byte) (n int, err error) { if w.bw == nil { bw := bufWriterPool.Get().(*bufio.Writer) - bw.Reset(w.w) + bw.Reset((*bufferedWriterTimeoutWriter)(w)) w.bw = bw } return w.bw.Write(p) @@ -292,6 +305,38 @@ func (w *bufferedWriter) Flush() error { return err } +type bufferedWriterTimeoutWriter bufferedWriter + +func (w *bufferedWriterTimeoutWriter) Write(p []byte) (n int, err error) { + return writeWithByteTimeout(w.group, w.conn, w.byteTimeout, p) +} + +// writeWithByteTimeout writes to conn. +// If more than timeout passes without any bytes being written to the connection, +// the write fails. +func writeWithByteTimeout(group synctestGroupInterface, conn net.Conn, timeout time.Duration, p []byte) (n int, err error) { + if timeout <= 0 { + return conn.Write(p) + } + for { + var now time.Time + if group == nil { + now = time.Now() + } else { + now = group.Now() + } + conn.SetWriteDeadline(now.Add(timeout)) + nn, err := conn.Write(p[n:]) + n += nn + if n == len(p) || nn == 0 || !errors.Is(err, os.ErrDeadlineExceeded) { + // Either we finished the write, made no progress, or hit the deadline. + // Whichever it is, we're done now. + conn.SetWriteDeadline(time.Time{}) + return n, err + } + } +} + func mustUint31(v int32) uint32 { if v < 0 || v > 2147483647 { panic("out of range") @@ -383,3 +428,14 @@ func validPseudoPath(v string) bool { // makes that struct also non-comparable, and generally doesn't add // any size (as long as it's first). type incomparable [0]func() + +// synctestGroupInterface is the methods of synctestGroup used by Server and Transport. +// It's defined as an interface here to let us keep synctestGroup entirely test-only +// and not a part of non-test builds. +type synctestGroupInterface interface { + Join() + Now() time.Time + NewTimer(d time.Duration) timer + AfterFunc(d time.Duration, f func()) timer + ContextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) +} diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index c5d08108..b55547ae 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -29,6 +29,7 @@ import ( "bufio" "bytes" "context" + "crypto/rand" "crypto/tls" "errors" "fmt" @@ -52,10 +53,14 @@ import ( ) const ( - prefaceTimeout = 10 * time.Second - firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway - handlerChunkWriteSize = 4 << 10 - defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? + prefaceTimeout = 10 * time.Second + firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway + handlerChunkWriteSize = 4 << 10 + defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? + + // maxQueuedControlFrames is the maximum number of control frames like + // SETTINGS, PING and RST_STREAM that will be queued for writing before + // the connection is closed to prevent memory exhaustion attacks. maxQueuedControlFrames = 10000 ) @@ -127,6 +132,22 @@ type Server struct { // If zero or negative, there is no timeout. IdleTimeout time.Duration + // ReadIdleTimeout is the timeout after which a health check using a ping + // frame will be carried out if no frame is received on the connection. + // If zero, no health check is performed. + ReadIdleTimeout time.Duration + + // PingTimeout is the timeout after which the connection will be closed + // if a response to a ping is not received. + // If zero, a default of 15 seconds is used. + PingTimeout time.Duration + + // WriteByteTimeout is the timeout after which a connection will be + // closed if no data can be written to it. The timeout begins when data is + // available to write, and is extended whenever any bytes are written. + // If zero or negative, there is no timeout. + WriteByteTimeout time.Duration + // MaxUploadBufferPerConnection is the size of the initial flow // control window for each connections. The HTTP/2 spec does not // allow this to be smaller than 65535 or larger than 2^32-1. @@ -154,57 +175,39 @@ type Server struct { // so that we don't embed a Mutex in this struct, which will make the // struct non-copyable, which might break some callers. state *serverInternalState -} - -func (s *Server) initialConnRecvWindowSize() int32 { - if s.MaxUploadBufferPerConnection >= initialWindowSize { - return s.MaxUploadBufferPerConnection - } - return 1 << 20 -} -func (s *Server) initialStreamRecvWindowSize() int32 { - if s.MaxUploadBufferPerStream > 0 { - return s.MaxUploadBufferPerStream - } - return 1 << 20 + // Synchronization group used for testing. + // Outside of tests, this is nil. + group synctestGroupInterface } -func (s *Server) maxReadFrameSize() uint32 { - if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize { - return v +func (s *Server) markNewGoroutine() { + if s.group != nil { + s.group.Join() } - return defaultMaxReadFrameSize } -func (s *Server) maxConcurrentStreams() uint32 { - if v := s.MaxConcurrentStreams; v > 0 { - return v +func (s *Server) now() time.Time { + if s.group != nil { + return s.group.Now() } - return defaultMaxStreams + return time.Now() } -func (s *Server) maxDecoderHeaderTableSize() uint32 { - if v := s.MaxDecoderHeaderTableSize; v > 0 { - return v +// newTimer creates a new time.Timer, or a synthetic timer in tests. +func (s *Server) newTimer(d time.Duration) timer { + if s.group != nil { + return s.group.NewTimer(d) } - return initialHeaderTableSize + return timeTimer{time.NewTimer(d)} } -func (s *Server) maxEncoderHeaderTableSize() uint32 { - if v := s.MaxEncoderHeaderTableSize; v > 0 { - return v +// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests. +func (s *Server) afterFunc(d time.Duration, f func()) timer { + if s.group != nil { + return s.group.AfterFunc(d, f) } - return initialHeaderTableSize -} - -// maxQueuedControlFrames is the maximum number of control frames like -// SETTINGS, PING and RST_STREAM that will be queued for writing before -// the connection is closed to prevent memory exhaustion attacks. -func (s *Server) maxQueuedControlFrames() int { - // TODO: if anybody asks, add a Server field, and remember to define the - // behavior of negative values. - return maxQueuedControlFrames + return timeTimer{time.AfterFunc(d, f)} } type serverInternalState struct { @@ -303,7 +306,7 @@ func ConfigureServer(s *http.Server, conf *Server) error { if s.TLSNextProto == nil { s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){} } - protoHandler := func(hs *http.Server, c *tls.Conn, h http.Handler) { + protoHandler := func(hs *http.Server, c net.Conn, h http.Handler, sawClientPreface bool) { if testHookOnConn != nil { testHookOnConn() } @@ -320,12 +323,31 @@ func ConfigureServer(s *http.Server, conf *Server) error { ctx = bc.BaseContext() } conf.ServeConn(c, &ServeConnOpts{ - Context: ctx, - Handler: h, - BaseConfig: hs, + Context: ctx, + Handler: h, + BaseConfig: hs, + SawClientPreface: sawClientPreface, }) } - s.TLSNextProto[NextProtoTLS] = protoHandler + s.TLSNextProto[NextProtoTLS] = func(hs *http.Server, c *tls.Conn, h http.Handler) { + protoHandler(hs, c, h, false) + } + // The "unencrypted_http2" TLSNextProto key is used to pass off non-TLS HTTP/2 conns. + // + // A connection passed in this method has already had the HTTP/2 preface read from it. + s.TLSNextProto[nextProtoUnencryptedHTTP2] = func(hs *http.Server, c *tls.Conn, h http.Handler) { + nc, err := unencryptedNetConnFromTLSConn(c) + if err != nil { + if lg := hs.ErrorLog; lg != nil { + lg.Print(err) + } else { + log.Print(err) + } + go c.Close() + return + } + protoHandler(hs, nc, h, true) + } return nil } @@ -400,16 +422,22 @@ func (o *ServeConnOpts) handler() http.Handler { // // The opts parameter is optional. If nil, default values are used. func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { + s.serveConn(c, opts, nil) +} + +func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverConn)) { baseCtx, cancel := serverConnBaseContext(c, opts) defer cancel() + http1srv := opts.baseConfig() + conf := configFromServer(http1srv, s) sc := &serverConn{ srv: s, - hs: opts.baseConfig(), + hs: http1srv, conn: c, baseCtx: baseCtx, remoteAddrStr: c.RemoteAddr().String(), - bw: newBufferedWriter(c), + bw: newBufferedWriter(s.group, c, conf.WriteByteTimeout), handler: opts.handler(), streams: make(map[uint32]*stream), readFrameCh: make(chan readFrameResult), @@ -419,13 +447,19 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way doneServing: make(chan struct{}), clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value" - advMaxStreams: s.maxConcurrentStreams(), + advMaxStreams: conf.MaxConcurrentStreams, initialStreamSendWindowSize: initialWindowSize, + initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream, maxFrameSize: initialMaxFrameSize, + pingTimeout: conf.PingTimeout, + countErrorFunc: conf.CountError, serveG: newGoroutineLock(), pushEnabled: true, sawClientPreface: opts.SawClientPreface, } + if newf != nil { + newf(sc) + } s.state.registerConn(sc) defer s.state.unregisterConn(sc) @@ -451,15 +485,15 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { sc.flow.add(initialWindowSize) sc.inflow.init(initialWindowSize) sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) - sc.hpackEncoder.SetMaxDynamicTableSizeLimit(s.maxEncoderHeaderTableSize()) + sc.hpackEncoder.SetMaxDynamicTableSizeLimit(conf.MaxEncoderHeaderTableSize) fr := NewFramer(sc.bw, c) - if s.CountError != nil { - fr.countError = s.CountError + if conf.CountError != nil { + fr.countError = conf.CountError } - fr.ReadMetaHeaders = hpack.NewDecoder(s.maxDecoderHeaderTableSize(), nil) + fr.ReadMetaHeaders = hpack.NewDecoder(conf.MaxDecoderHeaderTableSize, nil) fr.MaxHeaderListSize = sc.maxHeaderListSize() - fr.SetMaxReadFrameSize(s.maxReadFrameSize()) + fr.SetMaxReadFrameSize(conf.MaxReadFrameSize) sc.framer = fr if tc, ok := c.(connectionStater); ok { @@ -492,7 +526,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { // So for now, do nothing here again. } - if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { + if !conf.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { // "Endpoints MAY choose to generate a connection error // (Section 5.4.1) of type INADEQUATE_SECURITY if one of // the prohibited cipher suites are negotiated." @@ -529,7 +563,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { opts.UpgradeRequest = nil } - sc.serve() + sc.serve(conf) } func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx context.Context, cancel func()) { @@ -569,6 +603,7 @@ type serverConn struct { tlsState *tls.ConnectionState // shared by all handlers, like net/http remoteAddrStr string writeSched WriteScheduler + countErrorFunc func(errType string) // Everything following is owned by the serve loop; use serveG.check(): serveG goroutineLock // used to verify funcs are on serve() @@ -588,6 +623,7 @@ type serverConn struct { streams map[uint32]*stream unstartedHandlers []unstartedHandler initialStreamSendWindowSize int32 + initialStreamRecvWindowSize int32 maxFrameSize int32 peerMaxHeaderListSize uint32 // zero means unknown (default) canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case @@ -598,9 +634,14 @@ type serverConn struct { inGoAway bool // we've started to or sent GOAWAY inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop needToSendGoAway bool // we need to schedule a GOAWAY frame write + pingSent bool + sentPingData [8]byte goAwayCode ErrCode - shutdownTimer *time.Timer // nil until used - idleTimer *time.Timer // nil if unused + shutdownTimer timer // nil until used + idleTimer timer // nil if unused + readIdleTimeout time.Duration + pingTimeout time.Duration + readIdleTimer timer // nil if unused // Owned by the writeFrameAsync goroutine: headerWriteBuf bytes.Buffer @@ -615,11 +656,7 @@ func (sc *serverConn) maxHeaderListSize() uint32 { if n <= 0 { n = http.DefaultMaxHeaderBytes } - // http2's count is in a slightly different unit and includes 32 bytes per pair. - // So, take the net/http.Server value and pad it up a bit, assuming 10 headers. - const perFieldOverhead = 32 // per http2 spec - const typicalHeaders = 10 // conservative - return uint32(n + typicalHeaders*perFieldOverhead) + return uint32(adjustHTTP1MaxHeaderSize(int64(n))) } func (sc *serverConn) curOpenStreams() uint32 { @@ -649,12 +686,12 @@ type stream struct { flow outflow // limits writing from Handler to client inflow inflow // what the client is allowed to POST/etc to us state streamState - resetQueued bool // RST_STREAM queued for write; set by sc.resetStream - gotTrailerHeader bool // HEADER frame for trailers was seen - wroteHeaders bool // whether we wrote headers (not status 100) - readDeadline *time.Timer // nil if unused - writeDeadline *time.Timer // nil if unused - closeErr error // set before cw is closed + resetQueued bool // RST_STREAM queued for write; set by sc.resetStream + gotTrailerHeader bool // HEADER frame for trailers was seen + wroteHeaders bool // whether we wrote headers (not status 100) + readDeadline timer // nil if unused + writeDeadline timer // nil if unused + closeErr error // set before cw is closed trailer http.Header // accumulated trailers reqTrailer http.Header // handler's Request.Trailer @@ -811,8 +848,9 @@ type readFrameResult struct { // consumer is done with the frame. // It's run on its own goroutine. func (sc *serverConn) readFrames() { - gate := make(gate) - gateDone := gate.Done + sc.srv.markNewGoroutine() + gate := make(chan struct{}) + gateDone := func() { gate <- struct{}{} } for { f, err := sc.framer.ReadFrame() select { @@ -843,6 +881,7 @@ type frameWriteResult struct { // At most one goroutine can be running writeFrameAsync at a time per // serverConn. func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest, wd *writeData) { + sc.srv.markNewGoroutine() var err error if wd == nil { err = wr.write.writeFrame(sc) @@ -881,7 +920,7 @@ func (sc *serverConn) notePanic() { } } -func (sc *serverConn) serve() { +func (sc *serverConn) serve(conf http2Config) { sc.serveG.check() defer sc.notePanic() defer sc.conn.Close() @@ -893,20 +932,24 @@ func (sc *serverConn) serve() { sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs) } + settings := writeSettings{ + {SettingMaxFrameSize, conf.MaxReadFrameSize}, + {SettingMaxConcurrentStreams, sc.advMaxStreams}, + {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, + {SettingHeaderTableSize, conf.MaxDecoderHeaderTableSize}, + {SettingInitialWindowSize, uint32(sc.initialStreamRecvWindowSize)}, + } + if !disableExtendedConnectProtocol { + settings = append(settings, Setting{SettingEnableConnectProtocol, 1}) + } sc.writeFrame(FrameWriteRequest{ - write: writeSettings{ - {SettingMaxFrameSize, sc.srv.maxReadFrameSize()}, - {SettingMaxConcurrentStreams, sc.advMaxStreams}, - {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, - {SettingHeaderTableSize, sc.srv.maxDecoderHeaderTableSize()}, - {SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())}, - }, + write: settings, }) sc.unackedSettings++ // Each connection starts with initialWindowSize inflow tokens. // If a higher value is configured, we add more tokens. - if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 { + if diff := conf.MaxUploadBufferPerConnection - initialWindowSize; diff > 0 { sc.sendWindowUpdate(nil, int(diff)) } @@ -922,15 +965,22 @@ func (sc *serverConn) serve() { sc.setConnState(http.StateIdle) if sc.srv.IdleTimeout > 0 { - sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) + sc.idleTimer = sc.srv.afterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) defer sc.idleTimer.Stop() } + if conf.SendPingTimeout > 0 { + sc.readIdleTimeout = conf.SendPingTimeout + sc.readIdleTimer = sc.srv.afterFunc(conf.SendPingTimeout, sc.onReadIdleTimer) + defer sc.readIdleTimer.Stop() + } + go sc.readFrames() // closed by defer sc.conn.Close above - settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer) + settingsTimer := sc.srv.afterFunc(firstSettingsTimeout, sc.onSettingsTimer) defer settingsTimer.Stop() + lastFrameTime := sc.srv.now() loopNum := 0 for { loopNum++ @@ -944,6 +994,7 @@ func (sc *serverConn) serve() { case res := <-sc.wroteFrameCh: sc.wroteFrame(res) case res := <-sc.readFrameCh: + lastFrameTime = sc.srv.now() // Process any written frames before reading new frames from the client since a // written frame could have triggered a new stream to be started. if sc.writingFrameAsync { @@ -975,6 +1026,8 @@ func (sc *serverConn) serve() { case idleTimerMsg: sc.vlogf("connection is idle") sc.goAway(ErrCodeNo) + case readIdleTimerMsg: + sc.handlePingTimer(lastFrameTime) case shutdownTimerMsg: sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr()) return @@ -997,7 +1050,7 @@ func (sc *serverConn) serve() { // If the peer is causing us to generate a lot of control frames, // but not reading them from us, assume they are trying to make us // run out of memory. - if sc.queuedControlFrames > sc.srv.maxQueuedControlFrames() { + if sc.queuedControlFrames > maxQueuedControlFrames { sc.vlogf("http2: too many control frames in send queue, closing connection") return } @@ -1013,12 +1066,39 @@ func (sc *serverConn) serve() { } } +func (sc *serverConn) handlePingTimer(lastFrameReadTime time.Time) { + if sc.pingSent { + sc.vlogf("timeout waiting for PING response") + sc.conn.Close() + return + } + + pingAt := lastFrameReadTime.Add(sc.readIdleTimeout) + now := sc.srv.now() + if pingAt.After(now) { + // We received frames since arming the ping timer. + // Reset it for the next possible timeout. + sc.readIdleTimer.Reset(pingAt.Sub(now)) + return + } + + sc.pingSent = true + // Ignore crypto/rand.Read errors: It generally can't fail, and worse case if it does + // is we send a PING frame containing 0s. + _, _ = rand.Read(sc.sentPingData[:]) + sc.writeFrame(FrameWriteRequest{ + write: &writePing{data: sc.sentPingData}, + }) + sc.readIdleTimer.Reset(sc.pingTimeout) +} + type serverMessage int // Message values sent to serveMsgCh. var ( settingsTimerMsg = new(serverMessage) idleTimerMsg = new(serverMessage) + readIdleTimerMsg = new(serverMessage) shutdownTimerMsg = new(serverMessage) gracefulShutdownMsg = new(serverMessage) handlerDoneMsg = new(serverMessage) @@ -1026,6 +1106,7 @@ var ( func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) } func (sc *serverConn) onIdleTimer() { sc.sendServeMsg(idleTimerMsg) } +func (sc *serverConn) onReadIdleTimer() { sc.sendServeMsg(readIdleTimerMsg) } func (sc *serverConn) onShutdownTimer() { sc.sendServeMsg(shutdownTimerMsg) } func (sc *serverConn) sendServeMsg(msg interface{}) { @@ -1057,10 +1138,10 @@ func (sc *serverConn) readPreface() error { errc <- nil } }() - timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server? + timer := sc.srv.newTimer(prefaceTimeout) // TODO: configurable on *Server? defer timer.Stop() select { - case <-timer.C: + case <-timer.C(): return errPrefaceTimeout case err := <-errc: if err == nil { @@ -1278,6 +1359,10 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) { sc.writingFrame = false sc.writingFrameAsync = false + if res.err != nil { + sc.conn.Close() + } + wr := res.wr if writeEndsStream(wr.write) { @@ -1425,7 +1510,7 @@ func (sc *serverConn) goAway(code ErrCode) { func (sc *serverConn) shutDownIn(d time.Duration) { sc.serveG.check() - sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer) + sc.shutdownTimer = sc.srv.afterFunc(d, sc.onShutdownTimer) } func (sc *serverConn) resetStream(se StreamError) { @@ -1552,6 +1637,11 @@ func (sc *serverConn) processFrame(f Frame) error { func (sc *serverConn) processPing(f *PingFrame) error { sc.serveG.check() if f.IsAck() { + if sc.pingSent && sc.sentPingData == f.Data { + // This is a response to a PING we sent. + sc.pingSent = false + sc.readIdleTimer.Reset(sc.readIdleTimeout) + } // 6.7 PING: " An endpoint MUST NOT respond to PING frames // containing this flag." return nil @@ -1639,7 +1729,7 @@ func (sc *serverConn) closeStream(st *stream, err error) { delete(sc.streams, st.id) if len(sc.streams) == 0 { sc.setConnState(http.StateIdle) - if sc.srv.IdleTimeout > 0 { + if sc.srv.IdleTimeout > 0 && sc.idleTimer != nil { sc.idleTimer.Reset(sc.srv.IdleTimeout) } if h1ServerKeepAlivesDisabled(sc.hs) { @@ -1661,6 +1751,7 @@ func (sc *serverConn) closeStream(st *stream, err error) { } } st.closeErr = err + st.cancelCtx() st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc sc.writeSched.CloseStream(st.id) } @@ -1714,6 +1805,9 @@ func (sc *serverConn) processSetting(s Setting) error { sc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31 case SettingMaxHeaderListSize: sc.peerMaxHeaderListSize = s.Val + case SettingEnableConnectProtocol: + // Receipt of this parameter by a server does not + // have any impact default: // Unknown setting: "An endpoint that receives a SETTINGS // frame with any unknown or unsupported identifier MUST @@ -2021,7 +2115,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { // (in Go 1.8), though. That's a more sane option anyway. if sc.hs.ReadTimeout > 0 { sc.conn.SetReadDeadline(time.Time{}) - st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout) + st.readDeadline = sc.srv.afterFunc(sc.hs.ReadTimeout, st.onReadTimeout) } return sc.scheduleHandler(id, rw, req, handler) @@ -2117,9 +2211,9 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream st.cw.Init() st.flow.conn = &sc.flow // link to conn-level counter st.flow.add(sc.initialStreamSendWindowSize) - st.inflow.init(sc.srv.initialStreamRecvWindowSize()) + st.inflow.init(sc.initialStreamRecvWindowSize) if sc.hs.WriteTimeout > 0 { - st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) + st.writeDeadline = sc.srv.afterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) } sc.streams[id] = st @@ -2144,11 +2238,17 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res scheme: f.PseudoValue("scheme"), authority: f.PseudoValue("authority"), path: f.PseudoValue("path"), + protocol: f.PseudoValue("protocol"), + } + + // extended connect is disabled, so we should not see :protocol + if disableExtendedConnectProtocol && rp.protocol != "" { + return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol)) } isConnect := rp.method == "CONNECT" if isConnect { - if rp.path != "" || rp.scheme != "" || rp.authority == "" { + if rp.protocol == "" && (rp.path != "" || rp.scheme != "" || rp.authority == "") { return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol)) } } else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") { @@ -2172,6 +2272,9 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res if rp.authority == "" { rp.authority = rp.header.Get("Host") } + if rp.protocol != "" { + rp.header.Set(":protocol", rp.protocol) + } rw, req, err := sc.newWriterAndRequestNoBody(st, rp) if err != nil { @@ -2198,6 +2301,7 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res type requestParam struct { method string scheme, authority, path string + protocol string header http.Header } @@ -2239,7 +2343,7 @@ func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*r var url_ *url.URL var requestURI string - if rp.method == "CONNECT" { + if rp.method == "CONNECT" && rp.protocol == "" { url_ = &url.URL{Host: rp.authority} requestURI = rp.authority // mimic HTTP/1 server behavior } else { @@ -2343,6 +2447,7 @@ func (sc *serverConn) handlerDone() { // Run on its own goroutine. func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { + sc.srv.markNewGoroutine() defer sc.sendServeMsg(handlerDoneMsg) didPanic := true defer func() { @@ -2639,7 +2744,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { var date string if _, ok := rws.snapHeader["Date"]; !ok { // TODO(bradfitz): be faster here, like net/http? measure. - date = time.Now().UTC().Format(http.TimeFormat) + date = rws.conn.srv.now().UTC().Format(http.TimeFormat) } for _, v := range rws.snapHeader["Trailer"] { @@ -2761,7 +2866,7 @@ func (rws *responseWriterState) promoteUndeclaredTrailers() { func (w *responseWriter) SetReadDeadline(deadline time.Time) error { st := w.rws.stream - if !deadline.IsZero() && deadline.Before(time.Now()) { + if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) { // If we're setting a deadline in the past, reset the stream immediately // so writes after SetWriteDeadline returns will fail. st.onReadTimeout() @@ -2777,9 +2882,9 @@ func (w *responseWriter) SetReadDeadline(deadline time.Time) error { if deadline.IsZero() { st.readDeadline = nil } else if st.readDeadline == nil { - st.readDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onReadTimeout) + st.readDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onReadTimeout) } else { - st.readDeadline.Reset(deadline.Sub(time.Now())) + st.readDeadline.Reset(deadline.Sub(sc.srv.now())) } }) return nil @@ -2787,7 +2892,7 @@ func (w *responseWriter) SetReadDeadline(deadline time.Time) error { func (w *responseWriter) SetWriteDeadline(deadline time.Time) error { st := w.rws.stream - if !deadline.IsZero() && deadline.Before(time.Now()) { + if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) { // If we're setting a deadline in the past, reset the stream immediately // so writes after SetWriteDeadline returns will fail. st.onWriteTimeout() @@ -2803,14 +2908,19 @@ func (w *responseWriter) SetWriteDeadline(deadline time.Time) error { if deadline.IsZero() { st.writeDeadline = nil } else if st.writeDeadline == nil { - st.writeDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onWriteTimeout) + st.writeDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onWriteTimeout) } else { - st.writeDeadline.Reset(deadline.Sub(time.Now())) + st.writeDeadline.Reset(deadline.Sub(sc.srv.now())) } }) return nil } +func (w *responseWriter) EnableFullDuplex() error { + // We always support full duplex responses, so this is a no-op. + return nil +} + func (w *responseWriter) Flush() { w.FlushError() } @@ -3257,7 +3367,7 @@ func (sc *serverConn) countError(name string, err error) error { if sc == nil || sc.srv == nil { return err } - f := sc.srv.CountError + f := sc.countErrorFunc if f == nil { return err } diff --git a/vendor/golang.org/x/net/http2/testsync.go b/vendor/golang.org/x/net/http2/testsync.go deleted file mode 100644 index 61075bd1..00000000 --- a/vendor/golang.org/x/net/http2/testsync.go +++ /dev/null @@ -1,331 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -package http2 - -import ( - "context" - "sync" - "time" -) - -// testSyncHooks coordinates goroutines in tests. -// -// For example, a call to ClientConn.RoundTrip involves several goroutines, including: -// - the goroutine running RoundTrip; -// - the clientStream.doRequest goroutine, which writes the request; and -// - the clientStream.readLoop goroutine, which reads the response. -// -// Using testSyncHooks, a test can start a RoundTrip and identify when all these goroutines -// are blocked waiting for some condition such as reading the Request.Body or waiting for -// flow control to become available. -// -// The testSyncHooks also manage timers and synthetic time in tests. -// This permits us to, for example, start a request and cause it to time out waiting for -// response headers without resorting to time.Sleep calls. -type testSyncHooks struct { - // active/inactive act as a mutex and condition variable. - // - // - neither chan contains a value: testSyncHooks is locked. - // - active contains a value: unlocked, and at least one goroutine is not blocked - // - inactive contains a value: unlocked, and all goroutines are blocked - active chan struct{} - inactive chan struct{} - - // goroutine counts - total int // total goroutines - condwait map[*sync.Cond]int // blocked in sync.Cond.Wait - blocked []*testBlockedGoroutine // otherwise blocked - - // fake time - now time.Time - timers []*fakeTimer - - // Transport testing: Report various events. - newclientconn func(*ClientConn) - newstream func(*clientStream) -} - -// testBlockedGoroutine is a blocked goroutine. -type testBlockedGoroutine struct { - f func() bool // blocked until f returns true - ch chan struct{} // closed when unblocked -} - -func newTestSyncHooks() *testSyncHooks { - h := &testSyncHooks{ - active: make(chan struct{}, 1), - inactive: make(chan struct{}, 1), - condwait: map[*sync.Cond]int{}, - } - h.inactive <- struct{}{} - h.now = time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC) - return h -} - -// lock acquires the testSyncHooks mutex. -func (h *testSyncHooks) lock() { - select { - case <-h.active: - case <-h.inactive: - } -} - -// waitInactive waits for all goroutines to become inactive. -func (h *testSyncHooks) waitInactive() { - for { - <-h.inactive - if !h.unlock() { - break - } - } -} - -// unlock releases the testSyncHooks mutex. -// It reports whether any goroutines are active. -func (h *testSyncHooks) unlock() (active bool) { - // Look for a blocked goroutine which can be unblocked. - blocked := h.blocked[:0] - unblocked := false - for _, b := range h.blocked { - if !unblocked && b.f() { - unblocked = true - close(b.ch) - } else { - blocked = append(blocked, b) - } - } - h.blocked = blocked - - // Count goroutines blocked on condition variables. - condwait := 0 - for _, count := range h.condwait { - condwait += count - } - - if h.total > condwait+len(blocked) { - h.active <- struct{}{} - return true - } else { - h.inactive <- struct{}{} - return false - } -} - -// goRun starts a new goroutine. -func (h *testSyncHooks) goRun(f func()) { - h.lock() - h.total++ - h.unlock() - go func() { - defer func() { - h.lock() - h.total-- - h.unlock() - }() - f() - }() -} - -// blockUntil indicates that a goroutine is blocked waiting for some condition to become true. -// It waits until f returns true before proceeding. -// -// Example usage: -// -// h.blockUntil(func() bool { -// // Is the context done yet? -// select { -// case <-ctx.Done(): -// default: -// return false -// } -// return true -// }) -// // Wait for the context to become done. -// <-ctx.Done() -// -// The function f passed to blockUntil must be non-blocking and idempotent. -func (h *testSyncHooks) blockUntil(f func() bool) { - if f() { - return - } - ch := make(chan struct{}) - h.lock() - h.blocked = append(h.blocked, &testBlockedGoroutine{ - f: f, - ch: ch, - }) - h.unlock() - <-ch -} - -// broadcast is sync.Cond.Broadcast. -func (h *testSyncHooks) condBroadcast(cond *sync.Cond) { - h.lock() - delete(h.condwait, cond) - h.unlock() - cond.Broadcast() -} - -// broadcast is sync.Cond.Wait. -func (h *testSyncHooks) condWait(cond *sync.Cond) { - h.lock() - h.condwait[cond]++ - h.unlock() -} - -// newTimer creates a new fake timer. -func (h *testSyncHooks) newTimer(d time.Duration) timer { - h.lock() - defer h.unlock() - t := &fakeTimer{ - hooks: h, - when: h.now.Add(d), - c: make(chan time.Time), - } - h.timers = append(h.timers, t) - return t -} - -// afterFunc creates a new fake AfterFunc timer. -func (h *testSyncHooks) afterFunc(d time.Duration, f func()) timer { - h.lock() - defer h.unlock() - t := &fakeTimer{ - hooks: h, - when: h.now.Add(d), - f: f, - } - h.timers = append(h.timers, t) - return t -} - -func (h *testSyncHooks) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) { - ctx, cancel := context.WithCancel(ctx) - t := h.afterFunc(d, cancel) - return ctx, func() { - t.Stop() - cancel() - } -} - -func (h *testSyncHooks) timeUntilEvent() time.Duration { - h.lock() - defer h.unlock() - var next time.Time - for _, t := range h.timers { - if next.IsZero() || t.when.Before(next) { - next = t.when - } - } - if d := next.Sub(h.now); d > 0 { - return d - } - return 0 -} - -// advance advances time and causes synthetic timers to fire. -func (h *testSyncHooks) advance(d time.Duration) { - h.lock() - defer h.unlock() - h.now = h.now.Add(d) - timers := h.timers[:0] - for _, t := range h.timers { - t := t // remove after go.mod depends on go1.22 - t.mu.Lock() - switch { - case t.when.After(h.now): - timers = append(timers, t) - case t.when.IsZero(): - // stopped timer - default: - t.when = time.Time{} - if t.c != nil { - close(t.c) - } - if t.f != nil { - h.total++ - go func() { - defer func() { - h.lock() - h.total-- - h.unlock() - }() - t.f() - }() - } - } - t.mu.Unlock() - } - h.timers = timers -} - -// A timer wraps a time.Timer, or a synthetic equivalent in tests. -// Unlike time.Timer, timer is single-use: The timer channel is closed when the timer expires. -type timer interface { - C() <-chan time.Time - Stop() bool - Reset(d time.Duration) bool -} - -// timeTimer implements timer using real time. -type timeTimer struct { - t *time.Timer - c chan time.Time -} - -// newTimeTimer creates a new timer using real time. -func newTimeTimer(d time.Duration) timer { - ch := make(chan time.Time) - t := time.AfterFunc(d, func() { - close(ch) - }) - return &timeTimer{t, ch} -} - -// newTimeAfterFunc creates an AfterFunc timer using real time. -func newTimeAfterFunc(d time.Duration, f func()) timer { - return &timeTimer{ - t: time.AfterFunc(d, f), - } -} - -func (t timeTimer) C() <-chan time.Time { return t.c } -func (t timeTimer) Stop() bool { return t.t.Stop() } -func (t timeTimer) Reset(d time.Duration) bool { return t.t.Reset(d) } - -// fakeTimer implements timer using fake time. -type fakeTimer struct { - hooks *testSyncHooks - - mu sync.Mutex - when time.Time // when the timer will fire - c chan time.Time // closed when the timer fires; mutually exclusive with f - f func() // called when the timer fires; mutually exclusive with c -} - -func (t *fakeTimer) C() <-chan time.Time { return t.c } - -func (t *fakeTimer) Stop() bool { - t.mu.Lock() - defer t.mu.Unlock() - stopped := t.when.IsZero() - t.when = time.Time{} - return stopped -} - -func (t *fakeTimer) Reset(d time.Duration) bool { - if t.c != nil || t.f == nil { - panic("fakeTimer only supports Reset on AfterFunc timers") - } - t.mu.Lock() - defer t.mu.Unlock() - t.hooks.lock() - defer t.hooks.unlock() - active := !t.when.IsZero() - t.when = t.hooks.now.Add(d) - if !active { - t.hooks.timers = append(t.hooks.timers, t) - } - return active -} diff --git a/vendor/golang.org/x/net/http2/timer.go b/vendor/golang.org/x/net/http2/timer.go new file mode 100644 index 00000000..0b1c17b8 --- /dev/null +++ b/vendor/golang.org/x/net/http2/timer.go @@ -0,0 +1,20 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package http2 + +import "time" + +// A timer is a time.Timer, as an interface which can be replaced in tests. +type timer = interface { + C() <-chan time.Time + Reset(d time.Duration) bool + Stop() bool +} + +// timeTimer adapts a time.Timer to the timer interface. +type timeTimer struct { + *time.Timer +} + +func (t timeTimer) C() <-chan time.Time { return t.Timer.C } diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 2fa49490..090d0e1b 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -25,7 +25,6 @@ import ( "net/http" "net/http/httptrace" "net/textproto" - "os" "sort" "strconv" "strings" @@ -185,42 +184,80 @@ type Transport struct { connPoolOnce sync.Once connPoolOrDef ClientConnPool // non-nil version of ConnPool - syncHooks *testSyncHooks + *transportTestHooks } -func (t *Transport) maxHeaderListSize() uint32 { - if t.MaxHeaderListSize == 0 { - return 10 << 20 +// Hook points used for testing. +// Outside of tests, t.transportTestHooks is nil and these all have minimal implementations. +// Inside tests, see the testSyncHooks function docs. + +type transportTestHooks struct { + newclientconn func(*ClientConn) + group synctestGroupInterface +} + +func (t *Transport) markNewGoroutine() { + if t != nil && t.transportTestHooks != nil { + t.transportTestHooks.group.Join() } - if t.MaxHeaderListSize == 0xffffffff { - return 0 +} + +func (t *Transport) now() time.Time { + if t != nil && t.transportTestHooks != nil { + return t.transportTestHooks.group.Now() } - return t.MaxHeaderListSize + return time.Now() } -func (t *Transport) maxFrameReadSize() uint32 { - if t.MaxReadFrameSize == 0 { - return 0 // use the default provided by the peer +func (t *Transport) timeSince(when time.Time) time.Duration { + if t != nil && t.transportTestHooks != nil { + return t.now().Sub(when) } - if t.MaxReadFrameSize < minMaxFrameSize { - return minMaxFrameSize + return time.Since(when) +} + +// newTimer creates a new time.Timer, or a synthetic timer in tests. +func (t *Transport) newTimer(d time.Duration) timer { + if t.transportTestHooks != nil { + return t.transportTestHooks.group.NewTimer(d) } - if t.MaxReadFrameSize > maxFrameSize { - return maxFrameSize + return timeTimer{time.NewTimer(d)} +} + +// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests. +func (t *Transport) afterFunc(d time.Duration, f func()) timer { + if t.transportTestHooks != nil { + return t.transportTestHooks.group.AfterFunc(d, f) } - return t.MaxReadFrameSize + return timeTimer{time.AfterFunc(d, f)} } -func (t *Transport) disableCompression() bool { - return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) +func (t *Transport) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) { + if t.transportTestHooks != nil { + return t.transportTestHooks.group.ContextWithTimeout(ctx, d) + } + return context.WithTimeout(ctx, d) } -func (t *Transport) pingTimeout() time.Duration { - if t.PingTimeout == 0 { - return 15 * time.Second +func (t *Transport) maxHeaderListSize() uint32 { + n := int64(t.MaxHeaderListSize) + if t.t1 != nil && t.t1.MaxResponseHeaderBytes != 0 { + n = t.t1.MaxResponseHeaderBytes + if n > 0 { + n = adjustHTTP1MaxHeaderSize(n) + } } - return t.PingTimeout + if n <= 0 { + return 10 << 20 + } + if n >= 0xffffffff { + return 0 + } + return uint32(n) +} +func (t *Transport) disableCompression() bool { + return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) } // ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2. @@ -258,8 +295,8 @@ func configureTransports(t1 *http.Transport) (*Transport, error) { if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") { t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1") } - upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper { - addr := authorityAddr("https", authority) + upgradeFn := func(scheme, authority string, c net.Conn) http.RoundTripper { + addr := authorityAddr(scheme, authority) if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil { go c.Close() return erringRoundTripper{err} @@ -270,18 +307,37 @@ func configureTransports(t1 *http.Transport) (*Transport, error) { // was unknown) go c.Close() } + if scheme == "http" { + return (*unencryptedTransport)(t2) + } return t2 } - if m := t1.TLSNextProto; len(m) == 0 { - t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{ - "h2": upgradeFn, + if t1.TLSNextProto == nil { + t1.TLSNextProto = make(map[string]func(string, *tls.Conn) http.RoundTripper) + } + t1.TLSNextProto[NextProtoTLS] = func(authority string, c *tls.Conn) http.RoundTripper { + return upgradeFn("https", authority, c) + } + // The "unencrypted_http2" TLSNextProto key is used to pass off non-TLS HTTP/2 conns. + t1.TLSNextProto[nextProtoUnencryptedHTTP2] = func(authority string, c *tls.Conn) http.RoundTripper { + nc, err := unencryptedNetConnFromTLSConn(c) + if err != nil { + go c.Close() + return erringRoundTripper{err} } - } else { - m["h2"] = upgradeFn + return upgradeFn("http", authority, nc) } return t2, nil } +// unencryptedTransport is a Transport with a RoundTrip method that +// always permits http:// URLs. +type unencryptedTransport Transport + +func (t *unencryptedTransport) RoundTrip(req *http.Request) (*http.Response, error) { + return (*Transport)(t).RoundTripOpt(req, RoundTripOpt{allowHTTP: true}) +} + func (t *Transport) connPool() ClientConnPool { t.connPoolOnce.Do(t.initConnPool) return t.connPoolOrDef @@ -301,7 +357,7 @@ type ClientConn struct { t *Transport tconn net.Conn // usually *tls.Conn, except specialized impls tlsState *tls.ConnectionState // nil only for specialized impls - reused uint32 // whether conn is being reused; atomic + atomicReused uint32 // whether conn is being reused; atomic singleUse bool // whether being used for a single http.Request getConnCalled bool // used by clientConnPool @@ -312,31 +368,54 @@ type ClientConn struct { idleTimeout time.Duration // or 0 for never idleTimer timer - mu sync.Mutex // guards following - cond *sync.Cond // hold mu; broadcast on flow/closed changes - flow outflow // our conn-level flow control quota (cs.outflow is per stream) - inflow inflow // peer's conn-level flow control - doNotReuse bool // whether conn is marked to not be reused for any future requests - closing bool - closed bool - seenSettings bool // true if we've seen a settings frame, false otherwise - wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back - goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received - goAwayDebug string // goAway frame's debug data, retained as a string - streams map[uint32]*clientStream // client-initiated - streamsReserved int // incr by ReserveNewRequest; decr on RoundTrip - nextStreamID uint32 - pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams - pings map[[8]byte]chan struct{} // in flight ping data to notification channel - br *bufio.Reader - lastActive time.Time - lastIdle time.Time // time last idle + mu sync.Mutex // guards following + cond *sync.Cond // hold mu; broadcast on flow/closed changes + flow outflow // our conn-level flow control quota (cs.outflow is per stream) + inflow inflow // peer's conn-level flow control + doNotReuse bool // whether conn is marked to not be reused for any future requests + closing bool + closed bool + seenSettings bool // true if we've seen a settings frame, false otherwise + seenSettingsChan chan struct{} // closed when seenSettings is true or frame reading fails + wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back + goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received + goAwayDebug string // goAway frame's debug data, retained as a string + streams map[uint32]*clientStream // client-initiated + streamsReserved int // incr by ReserveNewRequest; decr on RoundTrip + nextStreamID uint32 + pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams + pings map[[8]byte]chan struct{} // in flight ping data to notification channel + br *bufio.Reader + lastActive time.Time + lastIdle time.Time // time last idle // Settings from peer: (also guarded by wmu) - maxFrameSize uint32 - maxConcurrentStreams uint32 - peerMaxHeaderListSize uint64 - peerMaxHeaderTableSize uint32 - initialWindowSize uint32 + maxFrameSize uint32 + maxConcurrentStreams uint32 + peerMaxHeaderListSize uint64 + peerMaxHeaderTableSize uint32 + initialWindowSize uint32 + initialStreamRecvWindowSize int32 + readIdleTimeout time.Duration + pingTimeout time.Duration + extendedConnectAllowed bool + + // rstStreamPingsBlocked works around an unfortunate gRPC behavior. + // gRPC strictly limits the number of PING frames that it will receive. + // The default is two pings per two hours, but the limit resets every time + // the gRPC endpoint sends a HEADERS or DATA frame. See golang/go#70575. + // + // rstStreamPingsBlocked is set after receiving a response to a PING frame + // bundled with an RST_STREAM (see pendingResets below), and cleared after + // receiving a HEADERS or DATA frame. + rstStreamPingsBlocked bool + + // pendingResets is the number of RST_STREAM frames we have sent to the peer, + // without confirming that the peer has received them. When we send a RST_STREAM, + // we bundle it with a PING frame, unless a PING is already in flight. We count + // the reset stream against the connection's concurrency limit until we get + // a PING response. This limits the number of requests we'll try to send to a + // completely unresponsive connection. + pendingResets int // reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests. // Write to reqHeaderMu to lock it, read from it to unlock. @@ -352,60 +431,6 @@ type ClientConn struct { werr error // first write error that has occurred hbuf bytes.Buffer // HPACK encoder writes into this henc *hpack.Encoder - - syncHooks *testSyncHooks // can be nil -} - -// Hook points used for testing. -// Outside of tests, cc.syncHooks is nil and these all have minimal implementations. -// Inside tests, see the testSyncHooks function docs. - -// goRun starts a new goroutine. -func (cc *ClientConn) goRun(f func()) { - if cc.syncHooks != nil { - cc.syncHooks.goRun(f) - return - } - go f() -} - -// condBroadcast is cc.cond.Broadcast. -func (cc *ClientConn) condBroadcast() { - if cc.syncHooks != nil { - cc.syncHooks.condBroadcast(cc.cond) - } - cc.cond.Broadcast() -} - -// condWait is cc.cond.Wait. -func (cc *ClientConn) condWait() { - if cc.syncHooks != nil { - cc.syncHooks.condWait(cc.cond) - } - cc.cond.Wait() -} - -// newTimer creates a new time.Timer, or a synthetic timer in tests. -func (cc *ClientConn) newTimer(d time.Duration) timer { - if cc.syncHooks != nil { - return cc.syncHooks.newTimer(d) - } - return newTimeTimer(d) -} - -// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests. -func (cc *ClientConn) afterFunc(d time.Duration, f func()) timer { - if cc.syncHooks != nil { - return cc.syncHooks.afterFunc(d, f) - } - return newTimeAfterFunc(d, f) -} - -func (cc *ClientConn) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) { - if cc.syncHooks != nil { - return cc.syncHooks.contextWithTimeout(ctx, d) - } - return context.WithTimeout(ctx, d) } // clientStream is the state for a single HTTP/2 stream. One of these @@ -448,12 +473,12 @@ type clientStream struct { sentHeaders bool // owned by clientConnReadLoop: - firstByte bool // got the first response byte - pastHeaders bool // got first MetaHeadersFrame (actual headers) - pastTrailers bool // got optional second MetaHeadersFrame (trailers) - num1xx uint8 // number of 1xx responses seen - readClosed bool // peer sent an END_STREAM flag - readAborted bool // read loop reset the stream + firstByte bool // got the first response byte + pastHeaders bool // got first MetaHeadersFrame (actual headers) + pastTrailers bool // got optional second MetaHeadersFrame (trailers) + readClosed bool // peer sent an END_STREAM flag + readAborted bool // read loop reset the stream + totalHeaderSize int64 // total size of 1xx headers seen trailer http.Header // accumulated trailers resTrailer *http.Header // client's Response.Trailer @@ -487,7 +512,7 @@ func (cs *clientStream) abortStreamLocked(err error) { // TODO(dneil): Clean up tests where cs.cc.cond is nil. if cs.cc.cond != nil { // Wake up writeRequestBody if it is waiting on flow control. - cs.cc.condBroadcast() + cs.cc.cond.Broadcast() } } @@ -497,7 +522,7 @@ func (cs *clientStream) abortRequestBodyWrite() { defer cc.mu.Unlock() if cs.reqBody != nil && cs.reqBodyClosed == nil { cs.closeReqBodyLocked() - cc.condBroadcast() + cc.cond.Broadcast() } } @@ -507,13 +532,15 @@ func (cs *clientStream) closeReqBodyLocked() { } cs.reqBodyClosed = make(chan struct{}) reqBodyClosed := cs.reqBodyClosed - cs.cc.goRun(func() { + go func() { + cs.cc.t.markNewGoroutine() cs.reqBody.Close() close(reqBodyClosed) - }) + }() } type stickyErrWriter struct { + group synctestGroupInterface conn net.Conn timeout time.Duration err *error @@ -523,22 +550,9 @@ func (sew stickyErrWriter) Write(p []byte) (n int, err error) { if *sew.err != nil { return 0, *sew.err } - for { - if sew.timeout != 0 { - sew.conn.SetWriteDeadline(time.Now().Add(sew.timeout)) - } - nn, err := sew.conn.Write(p[n:]) - n += nn - if n < len(p) && nn > 0 && errors.Is(err, os.ErrDeadlineExceeded) { - // Keep extending the deadline so long as we're making progress. - continue - } - if sew.timeout != 0 { - sew.conn.SetWriteDeadline(time.Time{}) - } - *sew.err = err - return n, err - } + n, err = writeWithByteTimeout(sew.group, sew.conn, sew.timeout, p) + *sew.err = err + return n, err } // noCachedConnError is the concrete type of ErrNoCachedConn, which @@ -569,6 +583,8 @@ type RoundTripOpt struct { // no cached connection is available, RoundTripOpt // will return ErrNoCachedConn. OnlyCachedConn bool + + allowHTTP bool // allow http:// URLs } func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { @@ -601,7 +617,14 @@ func authorityAddr(scheme string, authority string) (addr string) { // RoundTripOpt is like RoundTrip, but takes options. func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { - if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) { + switch req.URL.Scheme { + case "https": + // Always okay. + case "http": + if !t.AllowHTTP && !opt.allowHTTP { + return nil, errors.New("http2: unencrypted HTTP/2 not enabled") + } + default: return nil, errors.New("http2: unsupported scheme") } @@ -612,7 +635,7 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err) return nil, err } - reused := !atomic.CompareAndSwapUint32(&cc.reused, 0, 1) + reused := !atomic.CompareAndSwapUint32(&cc.atomicReused, 0, 1) traceGotConn(req, cc, reused) res, err := cc.RoundTrip(req) if err != nil && retry <= 6 { @@ -626,21 +649,7 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res backoff := float64(uint(1) << (uint(retry) - 1)) backoff += backoff * (0.1 * mathrand.Float64()) d := time.Second * time.Duration(backoff) - var tm timer - if t.syncHooks != nil { - tm = t.syncHooks.newTimer(d) - t.syncHooks.blockUntil(func() bool { - select { - case <-tm.C(): - case <-req.Context().Done(): - default: - return false - } - return true - }) - } else { - tm = newTimeTimer(d) - } + tm := t.newTimer(d) select { case <-tm.C(): t.vlogf("RoundTrip retrying after failure: %v", roundTripErr) @@ -651,6 +660,22 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res } } } + if err == errClientConnNotEstablished { + // This ClientConn was created recently, + // this is the first request to use it, + // and the connection is closed and not usable. + // + // In this state, cc.idleTimer will remove the conn from the pool + // when it fires. Stop the timer and remove it here so future requests + // won't try to use this connection. + // + // If the timer has already fired and we're racing it, the redundant + // call to MarkDead is harmless. + if cc.idleTimer != nil { + cc.idleTimer.Stop() + } + t.connPool().MarkDead(cc) + } if err != nil { t.vlogf("RoundTrip failure: %v", err) return nil, err @@ -669,9 +694,10 @@ func (t *Transport) CloseIdleConnections() { } var ( - errClientConnClosed = errors.New("http2: client conn is closed") - errClientConnUnusable = errors.New("http2: client conn not usable") - errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") + errClientConnClosed = errors.New("http2: client conn is closed") + errClientConnUnusable = errors.New("http2: client conn not usable") + errClientConnNotEstablished = errors.New("http2: client conn could not be established") + errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") ) // shouldRetryRequest is called by RoundTrip when a request fails to get @@ -725,8 +751,8 @@ func canRetryError(err error) bool { } func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse bool) (*ClientConn, error) { - if t.syncHooks != nil { - return t.newClientConn(nil, singleUse, t.syncHooks) + if t.transportTestHooks != nil { + return t.newClientConn(nil, singleUse) } host, _, err := net.SplitHostPort(addr) if err != nil { @@ -736,7 +762,7 @@ func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse b if err != nil { return nil, err } - return t.newClientConn(tconn, singleUse, nil) + return t.newClientConn(tconn, singleUse) } func (t *Transport) newTLSConfig(host string) *tls.Config { @@ -787,48 +813,38 @@ func (t *Transport) expectContinueTimeout() time.Duration { return t.t1.ExpectContinueTimeout } -func (t *Transport) maxDecoderHeaderTableSize() uint32 { - if v := t.MaxDecoderHeaderTableSize; v > 0 { - return v - } - return initialHeaderTableSize -} - -func (t *Transport) maxEncoderHeaderTableSize() uint32 { - if v := t.MaxEncoderHeaderTableSize; v > 0 { - return v - } - return initialHeaderTableSize -} - func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { - return t.newClientConn(c, t.disableKeepAlives(), nil) + return t.newClientConn(c, t.disableKeepAlives()) } -func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHooks) (*ClientConn, error) { +func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { + conf := configFromTransport(t) cc := &ClientConn{ - t: t, - tconn: c, - readerDone: make(chan struct{}), - nextStreamID: 1, - maxFrameSize: 16 << 10, // spec default - initialWindowSize: 65535, // spec default - maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings. - peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. - streams: make(map[uint32]*clientStream), - singleUse: singleUse, - wantSettingsAck: true, - pings: make(map[[8]byte]chan struct{}), - reqHeaderMu: make(chan struct{}, 1), - syncHooks: hooks, - } - if hooks != nil { - hooks.newclientconn(cc) + t: t, + tconn: c, + readerDone: make(chan struct{}), + nextStreamID: 1, + maxFrameSize: 16 << 10, // spec default + initialWindowSize: 65535, // spec default + initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream, + maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings. + peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. + streams: make(map[uint32]*clientStream), + singleUse: singleUse, + seenSettingsChan: make(chan struct{}), + wantSettingsAck: true, + readIdleTimeout: conf.SendPingTimeout, + pingTimeout: conf.PingTimeout, + pings: make(map[[8]byte]chan struct{}), + reqHeaderMu: make(chan struct{}, 1), + lastActive: t.now(), + } + var group synctestGroupInterface + if t.transportTestHooks != nil { + t.markNewGoroutine() + t.transportTestHooks.newclientconn(cc) c = cc.tconn - } - if d := t.idleConnTimeout(); d != 0 { - cc.idleTimeout = d - cc.idleTimer = cc.afterFunc(d, cc.onIdleTimeout) + group = t.group } if VerboseLogs { t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) @@ -840,30 +856,25 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHoo // TODO: adjust this writer size to account for frame size + // MTU + crypto/tls record padding. cc.bw = bufio.NewWriter(stickyErrWriter{ + group: group, conn: c, - timeout: t.WriteByteTimeout, + timeout: conf.WriteByteTimeout, err: &cc.werr, }) cc.br = bufio.NewReader(c) cc.fr = NewFramer(cc.bw, cc.br) - if t.maxFrameReadSize() != 0 { - cc.fr.SetMaxReadFrameSize(t.maxFrameReadSize()) - } + cc.fr.SetMaxReadFrameSize(conf.MaxReadFrameSize) if t.CountError != nil { cc.fr.countError = t.CountError } - maxHeaderTableSize := t.maxDecoderHeaderTableSize() + maxHeaderTableSize := conf.MaxDecoderHeaderTableSize cc.fr.ReadMetaHeaders = hpack.NewDecoder(maxHeaderTableSize, nil) cc.fr.MaxHeaderListSize = t.maxHeaderListSize() cc.henc = hpack.NewEncoder(&cc.hbuf) - cc.henc.SetMaxDynamicTableSizeLimit(t.maxEncoderHeaderTableSize()) + cc.henc.SetMaxDynamicTableSizeLimit(conf.MaxEncoderHeaderTableSize) cc.peerMaxHeaderTableSize = initialHeaderTableSize - if t.AllowHTTP { - cc.nextStreamID = 3 - } - if cs, ok := c.(connectionStater); ok { state := cs.ConnectionState() cc.tlsState = &state @@ -871,11 +882,9 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHoo initialSettings := []Setting{ {ID: SettingEnablePush, Val: 0}, - {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow}, - } - if max := t.maxFrameReadSize(); max != 0 { - initialSettings = append(initialSettings, Setting{ID: SettingMaxFrameSize, Val: max}) + {ID: SettingInitialWindowSize, Val: uint32(cc.initialStreamRecvWindowSize)}, } + initialSettings = append(initialSettings, Setting{ID: SettingMaxFrameSize, Val: conf.MaxReadFrameSize}) if max := t.maxHeaderListSize(); max != 0 { initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max}) } @@ -885,23 +894,29 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHoo cc.bw.Write(clientPreface) cc.fr.WriteSettings(initialSettings...) - cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow) - cc.inflow.init(transportDefaultConnFlow + initialWindowSize) + cc.fr.WriteWindowUpdate(0, uint32(conf.MaxUploadBufferPerConnection)) + cc.inflow.init(conf.MaxUploadBufferPerConnection + initialWindowSize) cc.bw.Flush() if cc.werr != nil { cc.Close() return nil, cc.werr } - cc.goRun(cc.readLoop) + // Start the idle timer after the connection is fully initialized. + if d := t.idleConnTimeout(); d != 0 { + cc.idleTimeout = d + cc.idleTimer = t.afterFunc(d, cc.onIdleTimeout) + } + + go cc.readLoop() return cc, nil } func (cc *ClientConn) healthCheck() { - pingTimeout := cc.t.pingTimeout() + pingTimeout := cc.pingTimeout // We don't need to periodically ping in the health check, because the readLoop of ClientConn will // trigger the healthCheck again if there is no frame received. - ctx, cancel := cc.contextWithTimeout(context.Background(), pingTimeout) + ctx, cancel := cc.t.contextWithTimeout(context.Background(), pingTimeout) defer cancel() cc.vlogf("http2: Transport sending health check") err := cc.Ping(ctx) @@ -1026,7 +1041,7 @@ func (cc *ClientConn) State() ClientConnState { return ClientConnState{ Closed: cc.closed, Closing: cc.closing || cc.singleUse || cc.doNotReuse || cc.goAway != nil, - StreamsActive: len(cc.streams), + StreamsActive: len(cc.streams) + cc.pendingResets, StreamsReserved: cc.streamsReserved, StreamsPending: cc.pendingRequests, LastIdle: cc.lastIdle, @@ -1058,16 +1073,38 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) { // writing it. maxConcurrentOkay = true } else { - maxConcurrentOkay = int64(len(cc.streams)+cc.streamsReserved+1) <= int64(cc.maxConcurrentStreams) + // We can take a new request if the total of + // - active streams; + // - reservation slots for new streams; and + // - streams for which we have sent a RST_STREAM and a PING, + // but received no subsequent frame + // is less than the concurrency limit. + maxConcurrentOkay = cc.currentRequestCountLocked() < int(cc.maxConcurrentStreams) } st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && maxConcurrentOkay && !cc.doNotReuse && int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 && !cc.tooIdleLocked() + + // If this connection has never been used for a request and is closed, + // then let it take a request (which will fail). + // + // This avoids a situation where an error early in a connection's lifetime + // goes unreported. + if cc.nextStreamID == 1 && cc.streamsReserved == 0 && cc.closed { + st.canTakeNewRequest = true + } + return } +// currentRequestCountLocked reports the number of concurrency slots currently in use, +// including active streams, reserved slots, and reset streams waiting for acknowledgement. +func (cc *ClientConn) currentRequestCountLocked() int { + return len(cc.streams) + cc.streamsReserved + cc.pendingResets +} + func (cc *ClientConn) canTakeNewRequestLocked() bool { st := cc.idleStateLocked() return st.canTakeNewRequest @@ -1080,7 +1117,7 @@ func (cc *ClientConn) tooIdleLocked() bool { // times are compared based on their wall time. We don't want // to reuse a connection that's been sitting idle during // VM/laptop suspend if monotonic time was also frozen. - return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && time.Since(cc.lastIdle.Round(0)) > cc.idleTimeout + return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && cc.t.timeSince(cc.lastIdle.Round(0)) > cc.idleTimeout } // onIdleTimeout is called from a time.AfterFunc goroutine. It will @@ -1144,7 +1181,8 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { // Wait for all in-flight streams to complete or connection to close done := make(chan struct{}) cancelled := false // guarded by cc.mu - cc.goRun(func() { + go func() { + cc.t.markNewGoroutine() cc.mu.Lock() defer cc.mu.Unlock() for { @@ -1156,9 +1194,9 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { if cancelled { break } - cc.condWait() + cc.cond.Wait() } - }) + }() shutdownEnterWaitStateHook() select { case <-done: @@ -1168,7 +1206,7 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { cc.mu.Lock() // Free the goroutine above cancelled = true - cc.condBroadcast() + cc.cond.Broadcast() cc.mu.Unlock() return ctx.Err() } @@ -1206,7 +1244,7 @@ func (cc *ClientConn) closeForError(err error) { for _, cs := range cc.streams { cs.abortStreamLocked(err) } - cc.condBroadcast() + cc.cond.Broadcast() cc.mu.Unlock() cc.closeConn() } @@ -1321,23 +1359,30 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) respHeaderRecv: make(chan struct{}), donec: make(chan struct{}), } - cc.goRun(func() { - cs.doRequest(req) - }) + + // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? + if !cc.t.disableCompression() && + req.Header.Get("Accept-Encoding") == "" && + req.Header.Get("Range") == "" && + !cs.isHead { + // Request gzip only, not deflate. Deflate is ambiguous and + // not as universally supported anyway. + // See: https://zlib.net/zlib_faq.html#faq39 + // + // Note that we don't request this for HEAD requests, + // due to a bug in nginx: + // http://trac.nginx.org/nginx/ticket/358 + // https://golang.org/issue/5522 + // + // We don't request gzip if the request is for a range, since + // auto-decoding a portion of a gzipped document will just fail + // anyway. See https://golang.org/issue/8923 + cs.requestedGzip = true + } + + go cs.doRequest(req, streamf) waitDone := func() error { - if cc.syncHooks != nil { - cc.syncHooks.blockUntil(func() bool { - select { - case <-cs.donec: - case <-ctx.Done(): - case <-cs.reqCancel: - default: - return false - } - return true - }) - } select { case <-cs.donec: return nil @@ -1398,24 +1443,7 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) return err } - if streamf != nil { - streamf(cs) - } - for { - if cc.syncHooks != nil { - cc.syncHooks.blockUntil(func() bool { - select { - case <-cs.respHeaderRecv: - case <-cs.abort: - case <-ctx.Done(): - case <-cs.reqCancel: - default: - return false - } - return true - }) - } select { case <-cs.respHeaderRecv: return handleResponseHeaders() @@ -1445,11 +1473,14 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) // doRequest runs for the duration of the request lifetime. // // It sends the request and performs post-request cleanup (closing Request.Body, etc.). -func (cs *clientStream) doRequest(req *http.Request) { - err := cs.writeRequest(req) +func (cs *clientStream) doRequest(req *http.Request, streamf func(*clientStream)) { + cs.cc.t.markNewGoroutine() + err := cs.writeRequest(req, streamf) cs.cleanupWriteRequest(err) } +var errExtendedConnectNotSupported = errors.New("net/http: extended connect not supported by peer") + // writeRequest sends a request. // // It returns nil after the request is written, the response read, @@ -1457,7 +1488,7 @@ func (cs *clientStream) doRequest(req *http.Request) { // // It returns non-nil if the request ends otherwise. // If the returned error is StreamError, the error Code may be used in resetting the stream. -func (cs *clientStream) writeRequest(req *http.Request) (err error) { +func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStream)) (err error) { cc := cs.cc ctx := cs.ctx @@ -1465,26 +1496,30 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { return err } + // wait for setting frames to be received, a server can change this value later, + // but we just wait for the first settings frame + var isExtendedConnect bool + if req.Method == "CONNECT" && req.Header.Get(":protocol") != "" { + isExtendedConnect = true + } + // Acquire the new-request lock by writing to reqHeaderMu. // This lock guards the critical section covering allocating a new stream ID // (requires mu) and creating the stream (requires wmu). if cc.reqHeaderMu == nil { panic("RoundTrip on uninitialized ClientConn") // for tests } - var newStreamHook func(*clientStream) - if cc.syncHooks != nil { - newStreamHook = cc.syncHooks.newstream - cc.syncHooks.blockUntil(func() bool { - select { - case cc.reqHeaderMu <- struct{}{}: - <-cc.reqHeaderMu - case <-cs.reqCancel: - case <-ctx.Done(): - default: - return false + if isExtendedConnect { + select { + case <-cs.reqCancel: + return errRequestCanceled + case <-ctx.Done(): + return ctx.Err() + case <-cc.seenSettingsChan: + if !cc.extendedConnectAllowed { + return errExtendedConnectNotSupported } - return true - }) + } } select { case cc.reqHeaderMu <- struct{}{}: @@ -1510,28 +1545,8 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { } cc.mu.Unlock() - if newStreamHook != nil { - newStreamHook(cs) - } - - // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? - if !cc.t.disableCompression() && - req.Header.Get("Accept-Encoding") == "" && - req.Header.Get("Range") == "" && - !cs.isHead { - // Request gzip only, not deflate. Deflate is ambiguous and - // not as universally supported anyway. - // See: https://zlib.net/zlib_faq.html#faq39 - // - // Note that we don't request this for HEAD requests, - // due to a bug in nginx: - // http://trac.nginx.org/nginx/ticket/358 - // https://golang.org/issue/5522 - // - // We don't request gzip if the request is for a range, since - // auto-decoding a portion of a gzipped document will just fail - // anyway. See https://golang.org/issue/8923 - cs.requestedGzip = true + if streamf != nil { + streamf(cs) } continueTimeout := cc.t.expectContinueTimeout() @@ -1594,7 +1609,7 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { var respHeaderTimer <-chan time.Time var respHeaderRecv chan struct{} if d := cc.responseHeaderTimeout(); d != 0 { - timer := cc.newTimer(d) + timer := cc.t.newTimer(d) defer timer.Stop() respHeaderTimer = timer.C() respHeaderRecv = cs.respHeaderRecv @@ -1603,21 +1618,6 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { // or until the request is aborted (via context, error, or otherwise), // whichever comes first. for { - if cc.syncHooks != nil { - cc.syncHooks.blockUntil(func() bool { - select { - case <-cs.peerClosed: - case <-respHeaderTimer: - case <-respHeaderRecv: - case <-cs.abort: - case <-ctx.Done(): - case <-cs.reqCancel: - default: - return false - } - return true - }) - } select { case <-cs.peerClosed: return nil @@ -1702,6 +1702,7 @@ func (cs *clientStream) cleanupWriteRequest(err error) { cs.reqBodyClosed = make(chan struct{}) } bodyClosed := cs.reqBodyClosed + closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil cc.mu.Unlock() if mustCloseBody { cs.reqBody.Close() @@ -1726,16 +1727,44 @@ func (cs *clientStream) cleanupWriteRequest(err error) { if cs.sentHeaders { if se, ok := err.(StreamError); ok { if se.Cause != errFromPeer { - cc.writeStreamReset(cs.ID, se.Code, err) + cc.writeStreamReset(cs.ID, se.Code, false, err) } } else { - cc.writeStreamReset(cs.ID, ErrCodeCancel, err) + // We're cancelling an in-flight request. + // + // This could be due to the server becoming unresponsive. + // To avoid sending too many requests on a dead connection, + // we let the request continue to consume a concurrency slot + // until we can confirm the server is still responding. + // We do this by sending a PING frame along with the RST_STREAM + // (unless a ping is already in flight). + // + // For simplicity, we don't bother tracking the PING payload: + // We reset cc.pendingResets any time we receive a PING ACK. + // + // We skip this if the conn is going to be closed on idle, + // because it's short lived and will probably be closed before + // we get the ping response. + ping := false + if !closeOnIdle { + cc.mu.Lock() + // rstStreamPingsBlocked works around a gRPC behavior: + // see comment on the field for details. + if !cc.rstStreamPingsBlocked { + if cc.pendingResets == 0 { + ping = true + } + cc.pendingResets++ + } + cc.mu.Unlock() + } + cc.writeStreamReset(cs.ID, ErrCodeCancel, ping, err) } } cs.bufPipe.CloseWithError(err) // no-op if already closed } else { if cs.sentHeaders && !cs.sentEndStream { - cc.writeStreamReset(cs.ID, ErrCodeNo, nil) + cc.writeStreamReset(cs.ID, ErrCodeNo, false, nil) } cs.bufPipe.CloseWithError(errRequestCanceled) } @@ -1757,16 +1786,21 @@ func (cs *clientStream) cleanupWriteRequest(err error) { // Must hold cc.mu. func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error { for { - cc.lastActive = time.Now() + if cc.closed && cc.nextStreamID == 1 && cc.streamsReserved == 0 { + // This is the very first request sent to this connection. + // Return a fatal error which aborts the retry loop. + return errClientConnNotEstablished + } + cc.lastActive = cc.t.now() if cc.closed || !cc.canTakeNewRequestLocked() { return errClientConnUnusable } cc.lastIdle = time.Time{} - if int64(len(cc.streams)) < int64(cc.maxConcurrentStreams) { + if cc.currentRequestCountLocked() < int(cc.maxConcurrentStreams) { return nil } cc.pendingRequests++ - cc.condWait() + cc.cond.Wait() cc.pendingRequests-- select { case <-cs.abort: @@ -2028,13 +2062,13 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) cs.flow.take(take) return take, nil } - cc.condWait() + cc.cond.Wait() } } func validateHeaders(hdrs http.Header) string { for k, vv := range hdrs { - if !httpguts.ValidHeaderFieldName(k) { + if !httpguts.ValidHeaderFieldName(k) && k != ":protocol" { return fmt.Sprintf("name %q", k) } for _, v := range vv { @@ -2050,6 +2084,10 @@ func validateHeaders(hdrs http.Header) string { var errNilRequestURL = errors.New("http2: Request.URI is nil") +func isNormalConnect(req *http.Request) bool { + return req.Method == "CONNECT" && req.Header.Get(":protocol") == "" +} + // requires cc.wmu be held. func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) { cc.hbuf.Reset() @@ -2070,7 +2108,7 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail } var path string - if req.Method != "CONNECT" { + if !isNormalConnect(req) { path = req.URL.RequestURI() if !validPseudoPath(path) { orig := path @@ -2107,7 +2145,7 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail m = http.MethodGet } f(":method", m) - if req.Method != "CONNECT" { + if !isNormalConnect(req) { f(":path", path) f(":scheme", req.URL.Scheme) } @@ -2288,7 +2326,7 @@ type resAndError struct { func (cc *ClientConn) addStreamLocked(cs *clientStream) { cs.flow.add(int32(cc.initialWindowSize)) cs.flow.setConnFlow(&cc.flow) - cs.inflow.init(transportDefaultStreamFlow) + cs.inflow.init(cc.initialStreamRecvWindowSize) cs.ID = cc.nextStreamID cc.nextStreamID += 2 cc.streams[cs.ID] = cs @@ -2304,14 +2342,14 @@ func (cc *ClientConn) forgetStreamID(id uint32) { if len(cc.streams) != slen-1 { panic("forgetting unknown stream id") } - cc.lastActive = time.Now() + cc.lastActive = cc.t.now() if len(cc.streams) == 0 && cc.idleTimer != nil { cc.idleTimer.Reset(cc.idleTimeout) - cc.lastIdle = time.Now() + cc.lastIdle = cc.t.now() } // Wake up writeRequestBody via clientStream.awaitFlowControl and // wake up RoundTrip if there is a pending request. - cc.condBroadcast() + cc.cond.Broadcast() closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil if closeOnIdle && cc.streamsReserved == 0 && len(cc.streams) == 0 { @@ -2333,6 +2371,7 @@ type clientConnReadLoop struct { // readLoop runs in its own goroutine and reads and dispatches frames. func (cc *ClientConn) readLoop() { + cc.t.markNewGoroutine() rl := &clientConnReadLoop{cc: cc} defer rl.cleanup() cc.readerErr = rl.run() @@ -2366,7 +2405,6 @@ func isEOFOrNetReadError(err error) bool { func (rl *clientConnReadLoop) cleanup() { cc := rl.cc - cc.t.connPool().MarkDead(cc) defer cc.closeConn() defer close(cc.readerDone) @@ -2390,6 +2428,24 @@ func (rl *clientConnReadLoop) cleanup() { } cc.closed = true + // If the connection has never been used, and has been open for only a short time, + // leave it in the connection pool for a little while. + // + // This avoids a situation where new connections are constantly created, + // added to the pool, fail, and are removed from the pool, without any error + // being surfaced to the user. + const unusedWaitTime = 5 * time.Second + idleTime := cc.t.now().Sub(cc.lastActive) + if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime { + cc.idleTimer = cc.t.afterFunc(unusedWaitTime-idleTime, func() { + cc.t.connPool().MarkDead(cc) + }) + } else { + cc.mu.Unlock() // avoid any deadlocks in MarkDead + cc.t.connPool().MarkDead(cc) + cc.mu.Lock() + } + for _, cs := range cc.streams { select { case <-cs.peerClosed: @@ -2399,7 +2455,7 @@ func (rl *clientConnReadLoop) cleanup() { cs.abortStreamLocked(err) } } - cc.condBroadcast() + cc.cond.Broadcast() cc.mu.Unlock() } @@ -2433,10 +2489,10 @@ func (cc *ClientConn) countReadFrameError(err error) { func (rl *clientConnReadLoop) run() error { cc := rl.cc gotSettings := false - readIdleTimeout := cc.t.ReadIdleTimeout + readIdleTimeout := cc.readIdleTimeout var t timer if readIdleTimeout != 0 { - t = cc.afterFunc(readIdleTimeout, cc.healthCheck) + t = cc.t.afterFunc(readIdleTimeout, cc.healthCheck) } for { f, err := cc.fr.ReadFrame() @@ -2447,7 +2503,7 @@ func (rl *clientConnReadLoop) run() error { cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err) } if se, ok := err.(StreamError); ok { - if cs := rl.streamByID(se.StreamID); cs != nil { + if cs := rl.streamByID(se.StreamID, notHeaderOrDataFrame); cs != nil { if se.Cause == nil { se.Cause = cc.fr.errDetail } @@ -2493,13 +2549,16 @@ func (rl *clientConnReadLoop) run() error { if VerboseLogs { cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err) } + if !cc.seenSettings { + close(cc.seenSettingsChan) + } return err } } } func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, headerOrDataFrame) if cs == nil { // We'd get here if we canceled a request while the // server had its response still in flight. So if this @@ -2617,15 +2676,34 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra if f.StreamEnded() { return nil, errors.New("1xx informational response with END_STREAM flag") } - cs.num1xx++ - const max1xxResponses = 5 // arbitrary bound on number of informational responses, same as net/http - if cs.num1xx > max1xxResponses { - return nil, errors.New("http2: too many 1xx informational responses") - } if fn := cs.get1xxTraceFunc(); fn != nil { + // If the 1xx response is being delivered to the user, + // then they're responsible for limiting the number + // of responses. if err := fn(statusCode, textproto.MIMEHeader(header)); err != nil { return nil, err } + } else { + // If the user didn't examine the 1xx response, then we + // limit the size of all 1xx headers. + // + // This differs a bit from the HTTP/1 implementation, which + // limits the size of all 1xx headers plus the final response. + // Use the larger limit of MaxHeaderListSize and + // net/http.Transport.MaxResponseHeaderBytes. + limit := int64(cs.cc.t.maxHeaderListSize()) + if t1 := cs.cc.t.t1; t1 != nil && t1.MaxResponseHeaderBytes > limit { + limit = t1.MaxResponseHeaderBytes + } + for _, h := range f.Fields { + cs.totalHeaderSize += int64(h.Size()) + } + if cs.totalHeaderSize > limit { + if VerboseLogs { + log.Printf("http2: 1xx informational responses too large") + } + return nil, errors.New("header list too large") + } } if statusCode == 100 { traceGot100Continue(cs.trace) @@ -2809,7 +2887,7 @@ func (b transportResponseBody) Close() error { func (rl *clientConnReadLoop) processData(f *DataFrame) error { cc := rl.cc - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, headerOrDataFrame) data := f.Data() if cs == nil { cc.mu.Lock() @@ -2944,9 +3022,22 @@ func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) { cs.abortStream(err) } -func (rl *clientConnReadLoop) streamByID(id uint32) *clientStream { +// Constants passed to streamByID for documentation purposes. +const ( + headerOrDataFrame = true + notHeaderOrDataFrame = false +) + +// streamByID returns the stream with the given id, or nil if no stream has that id. +// If headerOrData is true, it clears rst.StreamPingsBlocked. +func (rl *clientConnReadLoop) streamByID(id uint32, headerOrData bool) *clientStream { rl.cc.mu.Lock() defer rl.cc.mu.Unlock() + if headerOrData { + // Work around an unfortunate gRPC behavior. + // See comment on ClientConn.rstStreamPingsBlocked for details. + rl.cc.rstStreamPingsBlocked = false + } cs := rl.cc.streams[id] if cs != nil && !cs.readAborted { return cs @@ -3034,12 +3125,27 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { for _, cs := range cc.streams { cs.flow.add(delta) } - cc.condBroadcast() + cc.cond.Broadcast() cc.initialWindowSize = s.Val case SettingHeaderTableSize: cc.henc.SetMaxDynamicTableSize(s.Val) cc.peerMaxHeaderTableSize = s.Val + case SettingEnableConnectProtocol: + if err := s.Valid(); err != nil { + return err + } + // If the peer wants to send us SETTINGS_ENABLE_CONNECT_PROTOCOL, + // we require that it do so in the first SETTINGS frame. + // + // When we attempt to use extended CONNECT, we wait for the first + // SETTINGS frame to see if the server supports it. If we let the + // server enable the feature with a later SETTINGS frame, then + // users will see inconsistent results depending on whether we've + // seen that frame or not. + if !cc.seenSettings { + cc.extendedConnectAllowed = s.Val == 1 + } default: cc.vlogf("Unhandled Setting: %v", s) } @@ -3057,6 +3163,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { // connection can establish to our default. cc.maxConcurrentStreams = defaultMaxConcurrentStreams } + close(cc.seenSettingsChan) cc.seenSettings = true } @@ -3065,7 +3172,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { cc := rl.cc - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, notHeaderOrDataFrame) if f.StreamID != 0 && cs == nil { return nil } @@ -3089,12 +3196,12 @@ func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { return ConnectionError(ErrCodeFlowControl) } - cc.condBroadcast() + cc.cond.Broadcast() return nil } func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error { - cs := rl.streamByID(f.StreamID) + cs := rl.streamByID(f.StreamID, notHeaderOrDataFrame) if cs == nil { // TODO: return error if server tries to RST_STREAM an idle stream return nil @@ -3133,7 +3240,8 @@ func (cc *ClientConn) Ping(ctx context.Context) error { } var pingError error errc := make(chan struct{}) - cc.goRun(func() { + go func() { + cc.t.markNewGoroutine() cc.wmu.Lock() defer cc.wmu.Unlock() if pingError = cc.fr.WritePing(false, p); pingError != nil { @@ -3144,20 +3252,7 @@ func (cc *ClientConn) Ping(ctx context.Context) error { close(errc) return } - }) - if cc.syncHooks != nil { - cc.syncHooks.blockUntil(func() bool { - select { - case <-c: - case <-errc: - case <-ctx.Done(): - case <-cc.readerDone: - default: - return false - } - return true - }) - } + }() select { case <-c: return nil @@ -3181,6 +3276,12 @@ func (rl *clientConnReadLoop) processPing(f *PingFrame) error { close(c) delete(cc.pings, f.Data) } + if cc.pendingResets > 0 { + // See clientStream.cleanupWriteRequest. + cc.pendingResets = 0 + cc.rstStreamPingsBlocked = true + cc.cond.Broadcast() + } return nil } cc := rl.cc @@ -3203,13 +3304,20 @@ func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error { return ConnectionError(ErrCodeProtocol) } -func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) { +// writeStreamReset sends a RST_STREAM frame. +// When ping is true, it also sends a PING frame with a random payload. +func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, ping bool, err error) { // TODO: map err to more interesting error codes, once the // HTTP community comes up with some. But currently for // RST_STREAM there's no equivalent to GOAWAY frame's debug // data, and the error codes are all pretty vague ("cancel"). cc.wmu.Lock() cc.fr.WriteRSTStream(streamID, code) + if ping { + var payload [8]byte + rand.Read(payload[:]) + cc.fr.WritePing(false, payload) + } cc.bw.Flush() cc.wmu.Unlock() } @@ -3363,7 +3471,7 @@ func traceGotConn(req *http.Request, cc *ClientConn, reused bool) { cc.mu.Lock() ci.WasIdle = len(cc.streams) == 0 && reused if ci.WasIdle && !cc.lastActive.IsZero() { - ci.IdleTime = time.Since(cc.lastActive) + ci.IdleTime = cc.t.timeSince(cc.lastActive) } cc.mu.Unlock() diff --git a/vendor/golang.org/x/net/http2/unencrypted.go b/vendor/golang.org/x/net/http2/unencrypted.go new file mode 100644 index 00000000..b2de2116 --- /dev/null +++ b/vendor/golang.org/x/net/http2/unencrypted.go @@ -0,0 +1,32 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "crypto/tls" + "errors" + "net" +) + +const nextProtoUnencryptedHTTP2 = "unencrypted_http2" + +// unencryptedNetConnFromTLSConn retrieves a net.Conn wrapped in a *tls.Conn. +// +// TLSNextProto functions accept a *tls.Conn. +// +// When passing an unencrypted HTTP/2 connection to a TLSNextProto function, +// we pass a *tls.Conn with an underlying net.Conn containing the unencrypted connection. +// To be extra careful about mistakes (accidentally dropping TLS encryption in a place +// where we want it), the tls.Conn contains a net.Conn with an UnencryptedNetConn method +// that returns the actual connection we want to use. +func unencryptedNetConnFromTLSConn(tc *tls.Conn) (net.Conn, error) { + conner, ok := tc.NetConn().(interface { + UnencryptedNetConn() net.Conn + }) + if !ok { + return nil, errors.New("http2: TLS conn unexpectedly found in unencrypted handoff") + } + return conner.UnencryptedNetConn(), nil +} diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go index 33f61398..6ff6bee7 100644 --- a/vendor/golang.org/x/net/http2/write.go +++ b/vendor/golang.org/x/net/http2/write.go @@ -131,6 +131,16 @@ func (se StreamError) writeFrame(ctx writeContext) error { func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } +type writePing struct { + data [8]byte +} + +func (w writePing) writeFrame(ctx writeContext) error { + return ctx.Framer().WritePing(false, w.data) +} + +func (w writePing) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.data) <= max } + type writePingAck struct{ pf *PingFrame } func (w writePingAck) writeFrame(ctx writeContext) error { diff --git a/vendor/golang.org/x/net/http2/writesched_priority.go b/vendor/golang.org/x/net/http2/writesched_priority.go index 0a242c66..f6783339 100644 --- a/vendor/golang.org/x/net/http2/writesched_priority.go +++ b/vendor/golang.org/x/net/http2/writesched_priority.go @@ -443,8 +443,8 @@ func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, max } func (ws *priorityWriteScheduler) removeNode(n *priorityNode) { - for k := n.kids; k != nil; k = k.next { - k.setParent(n.parent) + for n.kids != nil { + n.kids.setParent(n.parent) } n.setParent(nil) delete(ws.nodes, n.id) diff --git a/vendor/golang.org/x/sys/LICENSE b/vendor/golang.org/x/sys/LICENSE index 6a66aea5..2a7cf70d 100644 --- a/vendor/golang.org/x/sys/LICENSE +++ b/vendor/golang.org/x/sys/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/sys/cpu/asm_darwin_x86_gc.s b/vendor/golang.org/x/sys/cpu/asm_darwin_x86_gc.s new file mode 100644 index 00000000..ec2acfe5 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/asm_darwin_x86_gc.s @@ -0,0 +1,17 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin && amd64 && gc + +#include "textflag.h" + +TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sysctl(SB) +GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) + +TEXT libc_sysctlbyname_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_sysctlbyname(SB) +GLOBL ·libc_sysctlbyname_trampoline_addr(SB), RODATA, $8 +DATA ·libc_sysctlbyname_trampoline_addr(SB)/8, $libc_sysctlbyname_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go index 8fa707aa..02609d5b 100644 --- a/vendor/golang.org/x/sys/cpu/cpu.go +++ b/vendor/golang.org/x/sys/cpu/cpu.go @@ -105,6 +105,8 @@ var ARM64 struct { HasSVE bool // Scalable Vector Extensions HasSVE2 bool // Scalable Vector Extensions 2 HasASIMDFHM bool // Advanced SIMD multiplication FP16 to FP32 + HasDIT bool // Data Independent Timing support + HasI8MM bool // Advanced SIMD Int8 matrix multiplication instructions _ CacheLinePad } @@ -199,6 +201,25 @@ var S390X struct { _ CacheLinePad } +// RISCV64 contains the supported CPU features and performance characteristics for riscv64 +// platforms. The booleans in RISCV64, with the exception of HasFastMisaligned, indicate +// the presence of RISC-V extensions. +// +// It is safe to assume that all the RV64G extensions are supported and so they are omitted from +// this structure. As riscv64 Go programs require at least RV64G, the code that populates +// this structure cannot run successfully if some of the RV64G extensions are missing. +// The struct is padded to avoid false sharing. +var RISCV64 struct { + _ CacheLinePad + HasFastMisaligned bool // Fast misaligned accesses + HasC bool // Compressed instruction-set extension + HasV bool // Vector extension compatible with RVV 1.0 + HasZba bool // Address generation instructions extension + HasZbb bool // Basic bit-manipulation extension + HasZbs bool // Single-bit instructions extension + _ CacheLinePad +} + func init() { archInit() initOptions() diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_arm64.go index 0e27a21e..af2aa99f 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.go @@ -38,6 +38,8 @@ func initOptions() { {Name: "dcpop", Feature: &ARM64.HasDCPOP}, {Name: "asimddp", Feature: &ARM64.HasASIMDDP}, {Name: "asimdfhm", Feature: &ARM64.HasASIMDFHM}, + {Name: "dit", Feature: &ARM64.HasDIT}, + {Name: "i8mm", Feature: &ARM64.HasI8MM}, } } @@ -145,6 +147,11 @@ func parseARM64SystemRegisters(isar0, isar1, pfr0 uint64) { ARM64.HasLRCPC = true } + switch extractBits(isar1, 52, 55) { + case 1: + ARM64.HasI8MM = true + } + // ID_AA64PFR0_EL1 switch extractBits(pfr0, 16, 19) { case 0: @@ -168,6 +175,11 @@ func parseARM64SystemRegisters(isar0, isar1, pfr0 uint64) { parseARM64SVERegister(getzfr0()) } + + switch extractBits(pfr0, 48, 51) { + case 1: + ARM64.HasDIT = true + } } func parseARM64SVERegister(zfr0 uint64) { diff --git a/vendor/golang.org/x/sys/cpu/cpu_darwin_x86.go b/vendor/golang.org/x/sys/cpu/cpu_darwin_x86.go new file mode 100644 index 00000000..b838cb9e --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_darwin_x86.go @@ -0,0 +1,61 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin && amd64 && gc + +package cpu + +// darwinSupportsAVX512 checks Darwin kernel for AVX512 support via sysctl +// call (see issue 43089). It also restricts AVX512 support for Darwin to +// kernel version 21.3.0 (MacOS 12.2.0) or later (see issue 49233). +// +// Background: +// Darwin implements a special mechanism to economize on thread state when +// AVX512 specific registers are not in use. This scheme minimizes state when +// preempting threads that haven't yet used any AVX512 instructions, but adds +// special requirements to check for AVX512 hardware support at runtime (e.g. +// via sysctl call or commpage inspection). See issue 43089 and link below for +// full background: +// https://github.com/apple-oss-distributions/xnu/blob/xnu-11215.1.10/osfmk/i386/fpu.c#L214-L240 +// +// Additionally, all versions of the Darwin kernel from 19.6.0 through 21.2.0 +// (corresponding to MacOS 10.15.6 - 12.1) have a bug that can cause corruption +// of the AVX512 mask registers (K0-K7) upon signal return. For this reason +// AVX512 is considered unsafe to use on Darwin for kernel versions prior to +// 21.3.0, where a fix has been confirmed. See issue 49233 for full background. +func darwinSupportsAVX512() bool { + return darwinSysctlEnabled([]byte("hw.optional.avx512f\x00")) && darwinKernelVersionCheck(21, 3, 0) +} + +// Ensure Darwin kernel version is at least major.minor.patch, avoiding dependencies +func darwinKernelVersionCheck(major, minor, patch int) bool { + var release [256]byte + err := darwinOSRelease(&release) + if err != nil { + return false + } + + var mmp [3]int + c := 0 +Loop: + for _, b := range release[:] { + switch { + case b >= '0' && b <= '9': + mmp[c] = 10*mmp[c] + int(b-'0') + case b == '.': + c++ + if c > 2 { + return false + } + case b == 0: + break Loop + default: + return false + } + } + if c != 2 { + return false + } + return mmp[0] > major || mmp[0] == major && (mmp[1] > minor || mmp[1] == minor && mmp[2] >= patch) +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go index 910728fb..32a44514 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go @@ -6,10 +6,10 @@ package cpu -// cpuid is implemented in cpu_x86.s for gc compiler +// cpuid is implemented in cpu_gc_x86.s for gc compiler // and in cpu_gccgo.c for gccgo. func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) -// xgetbv with ecx = 0 is implemented in cpu_x86.s for gc compiler +// xgetbv with ecx = 0 is implemented in cpu_gc_x86.s for gc compiler // and in cpu_gccgo.c for gccgo. func xgetbv() (eax, edx uint32) diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.s b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.s similarity index 94% rename from vendor/golang.org/x/sys/cpu/cpu_x86.s rename to vendor/golang.org/x/sys/cpu/cpu_gc_x86.s index 7d7ba33e..ce208ce6 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_x86.s +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.s @@ -18,7 +18,7 @@ TEXT ·cpuid(SB), NOSPLIT, $0-24 RET // func xgetbv() (eax, edx uint32) -TEXT ·xgetbv(SB),NOSPLIT,$0-8 +TEXT ·xgetbv(SB), NOSPLIT, $0-8 MOVL $0, CX XGETBV MOVL AX, eax+0(FP) diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go index 99c60fe9..170d21dd 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go @@ -23,9 +23,3 @@ func xgetbv() (eax, edx uint32) { gccgoXgetbv(&a, &d) return a, d } - -// gccgo doesn't build on Darwin, per: -// https://github.com/Homebrew/homebrew-core/blob/HEAD/Formula/gcc.rb#L76 -func darwinSupportsAVX512() bool { - return false -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go index 3d386d0f..f1caf0f7 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go @@ -35,8 +35,10 @@ const ( hwcap_SHA512 = 1 << 21 hwcap_SVE = 1 << 22 hwcap_ASIMDFHM = 1 << 23 + hwcap_DIT = 1 << 24 hwcap2_SVE2 = 1 << 1 + hwcap2_I8MM = 1 << 13 ) // linuxKernelCanEmulateCPUID reports whether we're running @@ -106,9 +108,11 @@ func doinit() { ARM64.HasSHA512 = isSet(hwCap, hwcap_SHA512) ARM64.HasSVE = isSet(hwCap, hwcap_SVE) ARM64.HasASIMDFHM = isSet(hwCap, hwcap_ASIMDFHM) + ARM64.HasDIT = isSet(hwCap, hwcap_DIT) // HWCAP2 feature bits ARM64.HasSVE2 = isSet(hwCap2, hwcap2_SVE2) + ARM64.HasI8MM = isSet(hwCap2, hwcap2_I8MM) } func isSet(hwc uint, value uint) bool { diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go index cd63e733..7d902b68 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build linux && !arm && !arm64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x +//go:build linux && !arm && !arm64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x && !riscv64 package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go new file mode 100644 index 00000000..cb4a0c57 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go @@ -0,0 +1,137 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "syscall" + "unsafe" +) + +// RISC-V extension discovery code for Linux. The approach here is to first try the riscv_hwprobe +// syscall falling back to HWCAP to check for the C extension if riscv_hwprobe is not available. +// +// A note on detection of the Vector extension using HWCAP. +// +// Support for the Vector extension version 1.0 was added to the Linux kernel in release 6.5. +// Support for the riscv_hwprobe syscall was added in 6.4. It follows that if the riscv_hwprobe +// syscall is not available then neither is the Vector extension (which needs kernel support). +// The riscv_hwprobe syscall should then be all we need to detect the Vector extension. +// However, some RISC-V board manufacturers ship boards with an older kernel on top of which +// they have back-ported various versions of the Vector extension patches but not the riscv_hwprobe +// patches. These kernels advertise support for the Vector extension using HWCAP. Falling +// back to HWCAP to detect the Vector extension, if riscv_hwprobe is not available, or simply not +// bothering with riscv_hwprobe at all and just using HWCAP may then seem like an attractive option. +// +// Unfortunately, simply checking the 'V' bit in AT_HWCAP will not work as this bit is used by +// RISC-V board and cloud instance providers to mean different things. The Lichee Pi 4A board +// and the Scaleway RV1 cloud instances use the 'V' bit to advertise their support for the unratified +// 0.7.1 version of the Vector Specification. The Banana Pi BPI-F3 and the CanMV-K230 board use +// it to advertise support for 1.0 of the Vector extension. Versions 0.7.1 and 1.0 of the Vector +// extension are binary incompatible. HWCAP can then not be used in isolation to populate the +// HasV field as this field indicates that the underlying CPU is compatible with RVV 1.0. +// +// There is a way at runtime to distinguish between versions 0.7.1 and 1.0 of the Vector +// specification by issuing a RVV 1.0 vsetvli instruction and checking the vill bit of the vtype +// register. This check would allow us to safely detect version 1.0 of the Vector extension +// with HWCAP, if riscv_hwprobe were not available. However, the check cannot +// be added until the assembler supports the Vector instructions. +// +// Note the riscv_hwprobe syscall does not suffer from these ambiguities by design as all of the +// extensions it advertises support for are explicitly versioned. It's also worth noting that +// the riscv_hwprobe syscall is the only way to detect multi-letter RISC-V extensions, e.g., Zba. +// These cannot be detected using HWCAP and so riscv_hwprobe must be used to detect the majority +// of RISC-V extensions. +// +// Please see https://docs.kernel.org/arch/riscv/hwprobe.html for more information. + +// golang.org/x/sys/cpu is not allowed to depend on golang.org/x/sys/unix so we must +// reproduce the constants, types and functions needed to make the riscv_hwprobe syscall +// here. + +const ( + // Copied from golang.org/x/sys/unix/ztypes_linux_riscv64.go. + riscv_HWPROBE_KEY_IMA_EXT_0 = 0x4 + riscv_HWPROBE_IMA_C = 0x2 + riscv_HWPROBE_IMA_V = 0x4 + riscv_HWPROBE_EXT_ZBA = 0x8 + riscv_HWPROBE_EXT_ZBB = 0x10 + riscv_HWPROBE_EXT_ZBS = 0x20 + riscv_HWPROBE_KEY_CPUPERF_0 = 0x5 + riscv_HWPROBE_MISALIGNED_FAST = 0x3 + riscv_HWPROBE_MISALIGNED_MASK = 0x7 +) + +const ( + // sys_RISCV_HWPROBE is copied from golang.org/x/sys/unix/zsysnum_linux_riscv64.go. + sys_RISCV_HWPROBE = 258 +) + +// riscvHWProbePairs is copied from golang.org/x/sys/unix/ztypes_linux_riscv64.go. +type riscvHWProbePairs struct { + key int64 + value uint64 +} + +const ( + // CPU features + hwcap_RISCV_ISA_C = 1 << ('C' - 'A') +) + +func doinit() { + // A slice of key/value pair structures is passed to the RISCVHWProbe syscall. The key + // field should be initialised with one of the key constants defined above, e.g., + // RISCV_HWPROBE_KEY_IMA_EXT_0. The syscall will set the value field to the appropriate value. + // If the kernel does not recognise a key it will set the key field to -1 and the value field to 0. + + pairs := []riscvHWProbePairs{ + {riscv_HWPROBE_KEY_IMA_EXT_0, 0}, + {riscv_HWPROBE_KEY_CPUPERF_0, 0}, + } + + // This call only indicates that extensions are supported if they are implemented on all cores. + if riscvHWProbe(pairs, 0) { + if pairs[0].key != -1 { + v := uint(pairs[0].value) + RISCV64.HasC = isSet(v, riscv_HWPROBE_IMA_C) + RISCV64.HasV = isSet(v, riscv_HWPROBE_IMA_V) + RISCV64.HasZba = isSet(v, riscv_HWPROBE_EXT_ZBA) + RISCV64.HasZbb = isSet(v, riscv_HWPROBE_EXT_ZBB) + RISCV64.HasZbs = isSet(v, riscv_HWPROBE_EXT_ZBS) + } + if pairs[1].key != -1 { + v := pairs[1].value & riscv_HWPROBE_MISALIGNED_MASK + RISCV64.HasFastMisaligned = v == riscv_HWPROBE_MISALIGNED_FAST + } + } + + // Let's double check with HWCAP if the C extension does not appear to be supported. + // This may happen if we're running on a kernel older than 6.4. + + if !RISCV64.HasC { + RISCV64.HasC = isSet(hwCap, hwcap_RISCV_ISA_C) + } +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} + +// riscvHWProbe is a simplified version of the generated wrapper function found in +// golang.org/x/sys/unix/zsyscall_linux_riscv64.go. We simplify it by removing the +// cpuCount and cpus parameters which we do not need. We always want to pass 0 for +// these parameters here so the kernel only reports the extensions that are present +// on all cores. +func riscvHWProbe(pairs []riscvHWProbePairs, flags uint) bool { + var _zero uintptr + var p0 unsafe.Pointer + if len(pairs) > 0 { + p0 = unsafe.Pointer(&pairs[0]) + } else { + p0 = unsafe.Pointer(&_zero) + } + + _, _, e1 := syscall.Syscall6(sys_RISCV_HWPROBE, uintptr(p0), uintptr(len(pairs)), uintptr(0), uintptr(0), uintptr(flags), 0) + return e1 == 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_x86.go b/vendor/golang.org/x/sys/cpu/cpu_other_x86.go new file mode 100644 index 00000000..a0fd7e2f --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_x86.go @@ -0,0 +1,11 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build 386 || amd64p32 || (amd64 && (!darwin || !gc)) + +package cpu + +func darwinSupportsAVX512() bool { + panic("only implemented for gc && amd64 && darwin") +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go index 7f0c79c0..aca3199c 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_riscv64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go @@ -8,4 +8,13 @@ package cpu const cacheLineSize = 64 -func initOptions() {} +func initOptions() { + options = []option{ + {Name: "fastmisaligned", Feature: &RISCV64.HasFastMisaligned}, + {Name: "c", Feature: &RISCV64.HasC}, + {Name: "v", Feature: &RISCV64.HasV}, + {Name: "zba", Feature: &RISCV64.HasZba}, + {Name: "zbb", Feature: &RISCV64.HasZbb}, + {Name: "zbs", Feature: &RISCV64.HasZbs}, + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go index c29f5e4c..600a6807 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_x86.go +++ b/vendor/golang.org/x/sys/cpu/cpu_x86.go @@ -92,10 +92,8 @@ func archInit() { osSupportsAVX = isSet(1, eax) && isSet(2, eax) if runtime.GOOS == "darwin" { - // Darwin doesn't save/restore AVX-512 mask registers correctly across signal handlers. - // Since users can't rely on mask register contents, let's not advertise AVX-512 support. - // See issue 49233. - osSupportsAVX512 = false + // Darwin requires special AVX512 checks, see cpu_darwin_x86.go + osSupportsAVX512 = osSupportsAVX && darwinSupportsAVX512() } else { // Check if OPMASK and ZMM registers have OS support. osSupportsAVX512 = osSupportsAVX && isSet(5, eax) && isSet(6, eax) && isSet(7, eax) diff --git a/vendor/golang.org/x/sys/cpu/syscall_darwin_x86_gc.go b/vendor/golang.org/x/sys/cpu/syscall_darwin_x86_gc.go new file mode 100644 index 00000000..4d0888b0 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/syscall_darwin_x86_gc.go @@ -0,0 +1,98 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Minimal copy of x/sys/unix so the cpu package can make a +// system call on Darwin without depending on x/sys/unix. + +//go:build darwin && amd64 && gc + +package cpu + +import ( + "syscall" + "unsafe" +) + +type _C_int int32 + +// adapted from unix.Uname() at x/sys/unix/syscall_darwin.go L419 +func darwinOSRelease(release *[256]byte) error { + // from x/sys/unix/zerrors_openbsd_amd64.go + const ( + CTL_KERN = 0x1 + KERN_OSRELEASE = 0x2 + ) + + mib := []_C_int{CTL_KERN, KERN_OSRELEASE} + n := unsafe.Sizeof(*release) + + return sysctl(mib, &release[0], &n, nil, 0) +} + +type Errno = syscall.Errno + +var _zero uintptr // Single-word zero for use when we need a valid pointer to 0 bytes. + +// from x/sys/unix/zsyscall_darwin_amd64.go L791-807 +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) error { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + if _, _, err := syscall_syscall6( + libc_sysctl_trampoline_addr, + uintptr(_p0), + uintptr(len(mib)), + uintptr(unsafe.Pointer(old)), + uintptr(unsafe.Pointer(oldlen)), + uintptr(unsafe.Pointer(new)), + uintptr(newlen), + ); err != 0 { + return err + } + + return nil +} + +var libc_sysctl_trampoline_addr uintptr + +// adapted from internal/cpu/cpu_arm64_darwin.go +func darwinSysctlEnabled(name []byte) bool { + out := int32(0) + nout := unsafe.Sizeof(out) + if ret := sysctlbyname(&name[0], (*byte)(unsafe.Pointer(&out)), &nout, nil, 0); ret != nil { + return false + } + return out > 0 +} + +//go:cgo_import_dynamic libc_sysctl sysctl "/usr/lib/libSystem.B.dylib" + +var libc_sysctlbyname_trampoline_addr uintptr + +// adapted from runtime/sys_darwin.go in the pattern of sysctl() above, as defined in x/sys/unix +func sysctlbyname(name *byte, old *byte, oldlen *uintptr, new *byte, newlen uintptr) error { + if _, _, err := syscall_syscall6( + libc_sysctlbyname_trampoline_addr, + uintptr(unsafe.Pointer(name)), + uintptr(unsafe.Pointer(old)), + uintptr(unsafe.Pointer(oldlen)), + uintptr(unsafe.Pointer(new)), + uintptr(newlen), + 0, + ); err != 0 { + return err + } + + return nil +} + +//go:cgo_import_dynamic libc_sysctlbyname sysctlbyname "/usr/lib/libSystem.B.dylib" + +// Implemented in the runtime package (runtime/sys_darwin.go) +func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) + +//go:linkname syscall_syscall6 syscall.syscall6 diff --git a/vendor/golang.org/x/sys/unix/README.md b/vendor/golang.org/x/sys/unix/README.md index 7d3c060e..6e08a76a 100644 --- a/vendor/golang.org/x/sys/unix/README.md +++ b/vendor/golang.org/x/sys/unix/README.md @@ -156,7 +156,7 @@ from the generated architecture-specific files listed below, and merge these into a common file for each OS. The merge is performed in the following steps: -1. Construct the set of common code that is idential in all architecture-specific files. +1. Construct the set of common code that is identical in all architecture-specific files. 2. Write this common code to the merged file. 3. Remove the common code from all architecture-specific files. diff --git a/vendor/golang.org/x/sys/unix/ioctl_linux.go b/vendor/golang.org/x/sys/unix/ioctl_linux.go index dbe680ea..7ca4fa12 100644 --- a/vendor/golang.org/x/sys/unix/ioctl_linux.go +++ b/vendor/golang.org/x/sys/unix/ioctl_linux.go @@ -58,6 +58,102 @@ func IoctlGetEthtoolDrvinfo(fd int, ifname string) (*EthtoolDrvinfo, error) { return &value, err } +// IoctlGetEthtoolTsInfo fetches ethtool timestamping and PHC +// association for the network device specified by ifname. +func IoctlGetEthtoolTsInfo(fd int, ifname string) (*EthtoolTsInfo, error) { + ifr, err := NewIfreq(ifname) + if err != nil { + return nil, err + } + + value := EthtoolTsInfo{Cmd: ETHTOOL_GET_TS_INFO} + ifrd := ifr.withData(unsafe.Pointer(&value)) + + err = ioctlIfreqData(fd, SIOCETHTOOL, &ifrd) + return &value, err +} + +// IoctlGetHwTstamp retrieves the hardware timestamping configuration +// for the network device specified by ifname. +func IoctlGetHwTstamp(fd int, ifname string) (*HwTstampConfig, error) { + ifr, err := NewIfreq(ifname) + if err != nil { + return nil, err + } + + value := HwTstampConfig{} + ifrd := ifr.withData(unsafe.Pointer(&value)) + + err = ioctlIfreqData(fd, SIOCGHWTSTAMP, &ifrd) + return &value, err +} + +// IoctlSetHwTstamp updates the hardware timestamping configuration for +// the network device specified by ifname. +func IoctlSetHwTstamp(fd int, ifname string, cfg *HwTstampConfig) error { + ifr, err := NewIfreq(ifname) + if err != nil { + return err + } + ifrd := ifr.withData(unsafe.Pointer(cfg)) + return ioctlIfreqData(fd, SIOCSHWTSTAMP, &ifrd) +} + +// FdToClockID derives the clock ID from the file descriptor number +// - see clock_gettime(3), FD_TO_CLOCKID macros. The resulting ID is +// suitable for system calls like ClockGettime. +func FdToClockID(fd int) int32 { return int32((int(^fd) << 3) | 3) } + +// IoctlPtpClockGetcaps returns the description of a given PTP device. +func IoctlPtpClockGetcaps(fd int) (*PtpClockCaps, error) { + var value PtpClockCaps + err := ioctlPtr(fd, PTP_CLOCK_GETCAPS2, unsafe.Pointer(&value)) + return &value, err +} + +// IoctlPtpSysOffsetPrecise returns a description of the clock +// offset compared to the system clock. +func IoctlPtpSysOffsetPrecise(fd int) (*PtpSysOffsetPrecise, error) { + var value PtpSysOffsetPrecise + err := ioctlPtr(fd, PTP_SYS_OFFSET_PRECISE2, unsafe.Pointer(&value)) + return &value, err +} + +// IoctlPtpSysOffsetExtended returns an extended description of the +// clock offset compared to the system clock. The samples parameter +// specifies the desired number of measurements. +func IoctlPtpSysOffsetExtended(fd int, samples uint) (*PtpSysOffsetExtended, error) { + value := PtpSysOffsetExtended{Samples: uint32(samples)} + err := ioctlPtr(fd, PTP_SYS_OFFSET_EXTENDED2, unsafe.Pointer(&value)) + return &value, err +} + +// IoctlPtpPinGetfunc returns the configuration of the specified +// I/O pin on given PTP device. +func IoctlPtpPinGetfunc(fd int, index uint) (*PtpPinDesc, error) { + value := PtpPinDesc{Index: uint32(index)} + err := ioctlPtr(fd, PTP_PIN_GETFUNC2, unsafe.Pointer(&value)) + return &value, err +} + +// IoctlPtpPinSetfunc updates configuration of the specified PTP +// I/O pin. +func IoctlPtpPinSetfunc(fd int, pd *PtpPinDesc) error { + return ioctlPtr(fd, PTP_PIN_SETFUNC2, unsafe.Pointer(pd)) +} + +// IoctlPtpPeroutRequest configures the periodic output mode of the +// PTP I/O pins. +func IoctlPtpPeroutRequest(fd int, r *PtpPeroutRequest) error { + return ioctlPtr(fd, PTP_PEROUT_REQUEST2, unsafe.Pointer(r)) +} + +// IoctlPtpExttsRequest configures the external timestamping mode +// of the PTP I/O pins. +func IoctlPtpExttsRequest(fd int, r *PtpExttsRequest) error { + return ioctlPtr(fd, PTP_EXTTS_REQUEST2, unsafe.Pointer(r)) +} + // IoctlGetWatchdogInfo fetches information about a watchdog device from the // Linux watchdog API. For more information, see: // https://www.kernel.org/doc/html/latest/watchdog/watchdog-api.html. diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index fdcaa974..6ab02b6c 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -58,6 +58,7 @@ includes_Darwin=' #define _DARWIN_USE_64_BIT_INODE #define __APPLE_USE_RFC_3542 #include +#include #include #include #include @@ -157,6 +158,16 @@ includes_Linux=' #endif #define _GNU_SOURCE +// See the description in unix/linux/types.go +#if defined(__ARM_EABI__) || \ + (defined(__mips__) && (_MIPS_SIM == _ABIO32)) || \ + (defined(__powerpc__) && (!defined(__powerpc64__))) +# ifdef _TIME_BITS +# undef _TIME_BITS +# endif +# define _TIME_BITS 32 +#endif + // is broken on powerpc64, as it fails to include definitions of // these structures. We just include them copied from . #if defined(__powerpc__) @@ -255,6 +266,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -263,6 +275,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -525,6 +538,7 @@ ccflags="$@" $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MREMAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL|TCPOPT|UDP)_/ || $2 ~ /^NFC_(GENL|PROTO|COMM|RF|SE|DIRECTION|LLCP|SOCKPROTO)_/ || $2 ~ /^NFC_.*_(MAX)?SIZE$/ || + $2 ~ /^PTP_/ || $2 ~ /^RAW_PAYLOAD_/ || $2 ~ /^[US]F_/ || $2 ~ /^TP_STATUS_/ || @@ -549,6 +563,8 @@ ccflags="$@" $2 !~ "NLA_TYPE_MASK" && $2 !~ /^RTC_VL_(ACCURACY|BACKUP|DATA)/ && $2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTC|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P|NETNSA)_/ || + $2 ~ /^SOCK_|SK_DIAG_|SKNLGRP_$/ || + $2 ~ /^(CONNECT|SAE)_/ || $2 ~ /^FIORDCHK$/ || $2 ~ /^SIOC/ || $2 ~ /^TIOC/ || @@ -652,7 +668,7 @@ errors=$( signals=$( echo '#include ' | $CC -x c - -E -dM $ccflags | awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print $2 }' | - grep -v 'SIGSTKSIZE\|SIGSTKSZ\|SIGRT\|SIGMAX64' | + grep -E -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT|SIGMAX64)' | sort ) @@ -662,7 +678,7 @@ echo '#include ' | $CC -x c - -E -dM $ccflags | sort >_error.grep echo '#include ' | $CC -x c - -E -dM $ccflags | awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print "^\t" $2 "[ \t]*=" }' | - grep -v 'SIGSTKSIZE\|SIGSTKSZ\|SIGRT\|SIGMAX64' | + grep -E -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT|SIGMAX64)' | sort >_signal.grep echo '// mkerrors.sh' "$@" diff --git a/vendor/golang.org/x/sys/unix/mremap.go b/vendor/golang.org/x/sys/unix/mremap.go index fd45fe52..3a5e776f 100644 --- a/vendor/golang.org/x/sys/unix/mremap.go +++ b/vendor/golang.org/x/sys/unix/mremap.go @@ -50,3 +50,8 @@ func (m *mremapMmapper) Mremap(oldData []byte, newLength int, flags int) (data [ func Mremap(oldData []byte, newLength int, flags int) (data []byte, err error) { return mapper.Mremap(oldData, newLength, flags) } + +func MremapPtr(oldAddr unsafe.Pointer, oldSize uintptr, newAddr unsafe.Pointer, newSize uintptr, flags int) (ret unsafe.Pointer, err error) { + xaddr, err := mapper.mremap(uintptr(oldAddr), oldSize, newSize, flags, uintptr(newAddr)) + return unsafe.Pointer(xaddr), err +} diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go index 67ce6cef..6f15ba1e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -360,7 +360,7 @@ func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, var status _C_int var r Pid_t err = ERESTART - // AIX wait4 may return with ERESTART errno, while the processus is still + // AIX wait4 may return with ERESTART errno, while the process is still // active. for err == ERESTART { r, err = wait4(Pid_t(pid), &status, options, rusage) diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 59542a89..099867de 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -402,6 +402,18 @@ func IoctlSetIfreqMTU(fd int, ifreq *IfreqMTU) error { return ioctlPtr(fd, SIOCSIFMTU, unsafe.Pointer(ifreq)) } +//sys renamexNp(from string, to string, flag uint32) (err error) + +func RenamexNp(from string, to string, flag uint32) (err error) { + return renamexNp(from, to, flag) +} + +//sys renameatxNp(fromfd int, from string, tofd int, to string, flag uint32) (err error) + +func RenameatxNp(fromfd int, from string, tofd int, to string, flag uint32) (err error) { + return renameatxNp(fromfd, from, tofd, to, flag) +} + //sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS_SYSCTL func Uname(uname *Utsname) error { @@ -542,6 +554,55 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { } } +//sys pthread_chdir_np(path string) (err error) + +func PthreadChdir(path string) (err error) { + return pthread_chdir_np(path) +} + +//sys pthread_fchdir_np(fd int) (err error) + +func PthreadFchdir(fd int) (err error) { + return pthread_fchdir_np(fd) +} + +// Connectx calls connectx(2) to initiate a connection on a socket. +// +// srcIf, srcAddr, and dstAddr are filled into a [SaEndpoints] struct and passed as the endpoints argument. +// +// - srcIf is the optional source interface index. 0 means unspecified. +// - srcAddr is the optional source address. nil means unspecified. +// - dstAddr is the destination address. +// +// On success, Connectx returns the number of bytes enqueued for transmission. +func Connectx(fd int, srcIf uint32, srcAddr, dstAddr Sockaddr, associd SaeAssocID, flags uint32, iov []Iovec, connid *SaeConnID) (n uintptr, err error) { + endpoints := SaEndpoints{ + Srcif: srcIf, + } + + if srcAddr != nil { + addrp, addrlen, err := srcAddr.sockaddr() + if err != nil { + return 0, err + } + endpoints.Srcaddr = (*RawSockaddr)(addrp) + endpoints.Srcaddrlen = uint32(addrlen) + } + + if dstAddr != nil { + addrp, addrlen, err := dstAddr.sockaddr() + if err != nil { + return 0, err + } + endpoints.Dstaddr = (*RawSockaddr)(addrp) + endpoints.Dstaddrlen = uint32(addrlen) + } + + err = connectx(fd, &endpoints, associd, flags, iov, &n, connid) + return +} + +//sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) //sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) //sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_hurd.go b/vendor/golang.org/x/sys/unix/syscall_hurd.go index ba46651f..a6a2d2fc 100644 --- a/vendor/golang.org/x/sys/unix/syscall_hurd.go +++ b/vendor/golang.org/x/sys/unix/syscall_hurd.go @@ -11,6 +11,7 @@ package unix int ioctl(int, unsigned long int, uintptr_t); */ import "C" +import "unsafe" func ioctl(fd int, req uint, arg uintptr) (err error) { r0, er := C.ioctl(C.int(fd), C.ulong(req), C.uintptr_t(arg)) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 5682e262..230a9454 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -1295,6 +1295,48 @@ func GetsockoptTCPInfo(fd, level, opt int) (*TCPInfo, error) { return &value, err } +// GetsockoptTCPCCVegasInfo returns algorithm specific congestion control information for a socket using the "vegas" +// algorithm. +// +// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option: +// +// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION) +func GetsockoptTCPCCVegasInfo(fd, level, opt int) (*TCPVegasInfo, error) { + var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment + vallen := _Socklen(SizeofTCPCCInfo) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen) + out := (*TCPVegasInfo)(unsafe.Pointer(&value[0])) + return out, err +} + +// GetsockoptTCPCCDCTCPInfo returns algorithm specific congestion control information for a socket using the "dctp" +// algorithm. +// +// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option: +// +// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION) +func GetsockoptTCPCCDCTCPInfo(fd, level, opt int) (*TCPDCTCPInfo, error) { + var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment + vallen := _Socklen(SizeofTCPCCInfo) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen) + out := (*TCPDCTCPInfo)(unsafe.Pointer(&value[0])) + return out, err +} + +// GetsockoptTCPCCBBRInfo returns algorithm specific congestion control information for a socket using the "bbr" +// algorithm. +// +// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option: +// +// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION) +func GetsockoptTCPCCBBRInfo(fd, level, opt int) (*TCPBBRInfo, error) { + var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment + vallen := _Socklen(SizeofTCPCCInfo) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen) + out := (*TCPBBRInfo)(unsafe.Pointer(&value[0])) + return out, err +} + // GetsockoptString returns the string value of the socket option opt for the // socket associated with fd at the given socket level. func GetsockoptString(fd, level, opt int) (string, error) { @@ -1818,6 +1860,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys ClockAdjtime(clockid int32, buf *Timex) (state int, err error) //sys ClockGetres(clockid int32, res *Timespec) (err error) //sys ClockGettime(clockid int32, time *Timespec) (err error) +//sys ClockSettime(clockid int32, time *Timespec) (err error) //sys ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) //sys Close(fd int) (err error) //sys CloseRange(first uint, last uint, flags uint) (err error) @@ -1959,7 +2002,26 @@ func Getpgrp() (pid int) { //sysnb Getpid() (pid int) //sysnb Getppid() (ppid int) //sys Getpriority(which int, who int) (prio int, err error) -//sys Getrandom(buf []byte, flags int) (n int, err error) + +func Getrandom(buf []byte, flags int) (n int, err error) { + vdsoRet, supported := vgetrandom(buf, uint32(flags)) + if supported { + if vdsoRet < 0 { + return 0, errnoErr(syscall.Errno(-vdsoRet)) + } + return vdsoRet, nil + } + var p *byte + if len(buf) > 0 { + p = &buf[0] + } + r, _, e := Syscall(SYS_GETRANDOM, uintptr(unsafe.Pointer(p)), uintptr(len(buf)), uintptr(flags)) + if e != 0 { + return 0, errnoErr(e) + } + return int(r), nil +} + //sysnb Getrusage(who int, rusage *Rusage) (err error) //sysnb Getsid(pid int) (sid int, err error) //sysnb Gettid() (tid int) @@ -2592,3 +2654,4 @@ func SchedGetAttr(pid int, flags uint) (*SchedAttr, error) { } //sys Cachestat(fd uint, crange *CachestatRange, cstat *Cachestat_t, flags uint) (err error) +//sys Mseal(b []byte, flags uint) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index cf2ee6c7..745e5c7e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -182,3 +182,5 @@ func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error } return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) } + +const SYS_FSTATAT = SYS_NEWFSTATAT diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go index 3d0e9845..dd2262a4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go @@ -214,3 +214,5 @@ func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error } return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) } + +const SYS_FSTATAT = SYS_NEWFSTATAT diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go index 6f5a2889..8cf3670b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go @@ -187,3 +187,5 @@ func RISCVHWProbe(pairs []RISCVHWProbePairs, set *CPUSet, flags uint) (err error } return riscvHWProbe(pairs, setSize, set, flags) } + +const SYS_FSTATAT = SYS_NEWFSTATAT diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go index b25343c7..b86ded54 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -293,6 +293,7 @@ func Uname(uname *Utsname) error { //sys Mkfifoat(dirfd int, path string, mode uint32) (err error) //sys Mknod(path string, mode uint32, dev int) (err error) //sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error) +//sys Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) //sys Nanosleep(time *Timespec, leftover *Timespec) (err error) //sys Open(path string, mode int, perm uint32) (fd int, err error) //sys Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go index 77081de8..4e92e5aa 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -154,6 +154,15 @@ func Munmap(b []byte) (err error) { return mapper.Munmap(b) } +func MmapPtr(fd int, offset int64, addr unsafe.Pointer, length uintptr, prot int, flags int) (ret unsafe.Pointer, err error) { + xaddr, err := mapper.mmap(uintptr(addr), length, prot, flags, fd, offset) + return unsafe.Pointer(xaddr), err +} + +func MunmapPtr(addr unsafe.Pointer, length uintptr) (err error) { + return mapper.munmap(uintptr(addr), length) +} + func Read(fd int, p []byte) (n int, err error) { n, err = read(fd, p) if raceenabled { diff --git a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go index 312ae6ac..7bf5c04b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go @@ -768,6 +768,15 @@ func Munmap(b []byte) (err error) { return mapper.Munmap(b) } +func MmapPtr(fd int, offset int64, addr unsafe.Pointer, length uintptr, prot int, flags int) (ret unsafe.Pointer, err error) { + xaddr, err := mapper.mmap(uintptr(addr), length, prot, flags, fd, offset) + return unsafe.Pointer(xaddr), err +} + +func MunmapPtr(addr unsafe.Pointer, length uintptr) (err error) { + return mapper.munmap(uintptr(addr), length) +} + //sys Gethostname(buf []byte) (err error) = SYS___GETHOSTNAME_A //sysnb Getgid() (gid int) //sysnb Getpid() (pid int) @@ -816,10 +825,10 @@ func Lstat(path string, stat *Stat_t) (err error) { // for checking symlinks begins with $VERSION/ $SYSNAME/ $SYSSYMR/ $SYSSYMA/ func isSpecialPath(path []byte) (v bool) { var special = [4][8]byte{ - [8]byte{'V', 'E', 'R', 'S', 'I', 'O', 'N', '/'}, - [8]byte{'S', 'Y', 'S', 'N', 'A', 'M', 'E', '/'}, - [8]byte{'S', 'Y', 'S', 'S', 'Y', 'M', 'R', '/'}, - [8]byte{'S', 'Y', 'S', 'S', 'Y', 'M', 'A', '/'}} + {'V', 'E', 'R', 'S', 'I', 'O', 'N', '/'}, + {'S', 'Y', 'S', 'N', 'A', 'M', 'E', '/'}, + {'S', 'Y', 'S', 'S', 'Y', 'M', 'R', '/'}, + {'S', 'Y', 'S', 'S', 'Y', 'M', 'A', '/'}} var i, j int for i = 0; i < len(special); i++ { @@ -3115,3 +3124,90 @@ func legacy_Mkfifoat(dirfd int, path string, mode uint32) (err error) { //sys Posix_openpt(oflag int) (fd int, err error) = SYS_POSIX_OPENPT //sys Grantpt(fildes int) (rc int, err error) = SYS_GRANTPT //sys Unlockpt(fildes int) (rc int, err error) = SYS_UNLOCKPT + +func fcntlAsIs(fd uintptr, cmd int, arg uintptr) (val int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCNTL<<4, uintptr(fd), uintptr(cmd), arg) + runtime.ExitSyscall() + val = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +func Fcntl(fd uintptr, cmd int, op interface{}) (ret int, err error) { + switch op.(type) { + case *Flock_t: + err = FcntlFlock(fd, cmd, op.(*Flock_t)) + if err != nil { + ret = -1 + } + return + case int: + return FcntlInt(fd, cmd, op.(int)) + case *F_cnvrt: + return fcntlAsIs(fd, cmd, uintptr(unsafe.Pointer(op.(*F_cnvrt)))) + case unsafe.Pointer: + return fcntlAsIs(fd, cmd, uintptr(op.(unsafe.Pointer))) + default: + return -1, EINVAL + } + return +} + +func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + if raceenabled { + raceReleaseMerge(unsafe.Pointer(&ioSync)) + } + return sendfile(outfd, infd, offset, count) +} + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + // TODO: use LE call instead if the call is implemented + originalOffset, err := Seek(infd, 0, SEEK_CUR) + if err != nil { + return -1, err + } + //start reading data from in_fd + if offset != nil { + _, err := Seek(infd, *offset, SEEK_SET) + if err != nil { + return -1, err + } + } + + buf := make([]byte, count) + readBuf := make([]byte, 0) + var n int = 0 + for i := 0; i < count; i += n { + n, err := Read(infd, buf) + if n == 0 { + if err != nil { + return -1, err + } else { // EOF + break + } + } + readBuf = append(readBuf, buf...) + buf = buf[0:0] + } + + n2, err := Write(outfd, readBuf) + if err != nil { + return -1, err + } + + //When sendfile() returns, this variable will be set to the + // offset of the byte following the last byte that was read. + if offset != nil { + *offset = *offset + int64(n) + // If offset is not NULL, then sendfile() does not modify the file + // offset of in_fd + _, err := Seek(infd, originalOffset, SEEK_SET) + if err != nil { + return -1, err + } + } + return n2, nil +} diff --git a/vendor/golang.org/x/sys/unix/vgetrandom_linux.go b/vendor/golang.org/x/sys/unix/vgetrandom_linux.go new file mode 100644 index 00000000..07ac8e09 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/vgetrandom_linux.go @@ -0,0 +1,13 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && go1.24 + +package unix + +import _ "unsafe" + +//go:linkname vgetrandom runtime.vgetrandom +//go:noescape +func vgetrandom(p []byte, flags uint32) (ret int, supported bool) diff --git a/vendor/golang.org/x/sys/unix/vgetrandom_unsupported.go b/vendor/golang.org/x/sys/unix/vgetrandom_unsupported.go new file mode 100644 index 00000000..297e97bc --- /dev/null +++ b/vendor/golang.org/x/sys/unix/vgetrandom_unsupported.go @@ -0,0 +1,11 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux || !go1.24 + +package unix + +func vgetrandom(p []byte, flags uint32) (ret int, supported bool) { + return -1, false +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go index e40fa852..d73c4652 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go @@ -237,6 +237,9 @@ const ( CLOCK_UPTIME_RAW_APPROX = 0x9 CLONE_NOFOLLOW = 0x1 CLONE_NOOWNERCOPY = 0x2 + CONNECT_DATA_AUTHENTICATED = 0x4 + CONNECT_DATA_IDEMPOTENT = 0x2 + CONNECT_RESUME_ON_READ_WRITE = 0x1 CR0 = 0x0 CR1 = 0x1000 CR2 = 0x2000 @@ -1169,6 +1172,11 @@ const ( PT_WRITE_D = 0x5 PT_WRITE_I = 0x4 PT_WRITE_U = 0x6 + RENAME_EXCL = 0x4 + RENAME_NOFOLLOW_ANY = 0x10 + RENAME_RESERVED1 = 0x8 + RENAME_SECLUDE = 0x1 + RENAME_SWAP = 0x2 RLIMIT_AS = 0x5 RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 @@ -1260,6 +1268,10 @@ const ( RTV_SSTHRESH = 0x20 RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 + SAE_ASSOCID_ALL = 0xffffffff + SAE_ASSOCID_ANY = 0x0 + SAE_CONNID_ALL = 0xffffffff + SAE_CONNID_ANY = 0x0 SCM_CREDS = 0x3 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go index bb02aa6c..4a55a400 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go @@ -237,6 +237,9 @@ const ( CLOCK_UPTIME_RAW_APPROX = 0x9 CLONE_NOFOLLOW = 0x1 CLONE_NOOWNERCOPY = 0x2 + CONNECT_DATA_AUTHENTICATED = 0x4 + CONNECT_DATA_IDEMPOTENT = 0x2 + CONNECT_RESUME_ON_READ_WRITE = 0x1 CR0 = 0x0 CR1 = 0x1000 CR2 = 0x2000 @@ -1169,6 +1172,11 @@ const ( PT_WRITE_D = 0x5 PT_WRITE_I = 0x4 PT_WRITE_U = 0x6 + RENAME_EXCL = 0x4 + RENAME_NOFOLLOW_ANY = 0x10 + RENAME_RESERVED1 = 0x8 + RENAME_SECLUDE = 0x1 + RENAME_SWAP = 0x2 RLIMIT_AS = 0x5 RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 @@ -1260,6 +1268,10 @@ const ( RTV_SSTHRESH = 0x20 RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 + SAE_ASSOCID_ALL = 0xffffffff + SAE_ASSOCID_ANY = 0x0 + SAE_CONNID_ALL = 0xffffffff + SAE_CONNID_ANY = 0x0 SCM_CREDS = 0x3 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 93a38a97..6ebc48b3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -321,6 +321,9 @@ const ( AUDIT_INTEGRITY_STATUS = 0x70a AUDIT_IPC = 0x517 AUDIT_IPC_SET_PERM = 0x51f + AUDIT_IPE_ACCESS = 0x58c + AUDIT_IPE_CONFIG_CHANGE = 0x58d + AUDIT_IPE_POLICY_LOAD = 0x58e AUDIT_KERNEL = 0x7d0 AUDIT_KERNEL_OTHER = 0x524 AUDIT_KERN_MODULE = 0x532 @@ -457,6 +460,7 @@ const ( B600 = 0x8 B75 = 0x2 B9600 = 0xd + BCACHEFS_SUPER_MAGIC = 0xca451a4e BDEVFS_MAGIC = 0x62646576 BINDERFS_SUPER_MAGIC = 0x6c6f6f70 BINFMTFS_MAGIC = 0x42494e4d @@ -488,12 +492,14 @@ const ( BPF_F_ID = 0x20 BPF_F_NETFILTER_IP_DEFRAG = 0x1 BPF_F_QUERY_EFFECTIVE = 0x1 + BPF_F_REDIRECT_FLAGS = 0x19 BPF_F_REPLACE = 0x4 BPF_F_SLEEPABLE = 0x10 BPF_F_STRICT_ALIGNMENT = 0x1 BPF_F_TEST_REG_INVARIANTS = 0x80 BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TEST_RUN_ON_CPU = 0x1 + BPF_F_TEST_SKB_CHECKSUM_COMPLETE = 0x4 BPF_F_TEST_STATE_FREQ = 0x8 BPF_F_TEST_XDP_LIVE_FRAMES = 0x2 BPF_F_XDP_DEV_BOUND_ONLY = 0x40 @@ -502,6 +508,7 @@ const ( BPF_IMM = 0x0 BPF_IND = 0x40 BPF_JA = 0x0 + BPF_JCOND = 0xe0 BPF_JEQ = 0x10 BPF_JGE = 0x30 BPF_JGT = 0x20 @@ -657,6 +664,9 @@ const ( CAN_NPROTO = 0x8 CAN_RAW = 0x1 CAN_RAW_FILTER_MAX = 0x200 + CAN_RAW_XL_VCID_RX_FILTER = 0x4 + CAN_RAW_XL_VCID_TX_PASS = 0x2 + CAN_RAW_XL_VCID_TX_SET = 0x1 CAN_RTR_FLAG = 0x40000000 CAN_SFF_ID_BITS = 0xb CAN_SFF_MASK = 0x7ff @@ -924,6 +934,7 @@ const ( EPOLL_CTL_ADD = 0x1 EPOLL_CTL_DEL = 0x2 EPOLL_CTL_MOD = 0x3 + EPOLL_IOC_TYPE = 0x8a EROFS_SUPER_MAGIC_V1 = 0xe0f5e1e2 ESP_V4_FLOW = 0xa ESP_V6_FLOW = 0xc @@ -937,9 +948,6 @@ const ( ETHTOOL_FEC_OFF = 0x4 ETHTOOL_FEC_RS = 0x8 ETHTOOL_FLAG_ALL = 0x7 - ETHTOOL_FLAG_COMPACT_BITSETS = 0x1 - ETHTOOL_FLAG_OMIT_REPLY = 0x2 - ETHTOOL_FLAG_STATS = 0x4 ETHTOOL_FLASHDEV = 0x33 ETHTOOL_FLASH_MAX_FILENAME = 0x80 ETHTOOL_FWVERS_LEN = 0x20 @@ -1162,6 +1170,7 @@ const ( EXTA = 0xe EXTB = 0xf F2FS_SUPER_MAGIC = 0xf2f52010 + FALLOC_FL_ALLOCATE_RANGE = 0x0 FALLOC_FL_COLLAPSE_RANGE = 0x8 FALLOC_FL_INSERT_RANGE = 0x20 FALLOC_FL_KEEP_SIZE = 0x1 @@ -1339,6 +1348,7 @@ const ( F_OFD_SETLK = 0x25 F_OFD_SETLKW = 0x26 F_OK = 0x0 + F_SEAL_EXEC = 0x20 F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 @@ -1627,6 +1637,7 @@ const ( IP_FREEBIND = 0xf IP_HDRINCL = 0x3 IP_IPSEC_POLICY = 0x10 + IP_LOCAL_PORT_RANGE = 0x33 IP_MAXPACKET = 0xffff IP_MAX_MEMBERSHIPS = 0x14 IP_MF = 0x2000 @@ -1653,6 +1664,7 @@ const ( IP_PMTUDISC_OMIT = 0x5 IP_PMTUDISC_PROBE = 0x3 IP_PMTUDISC_WANT = 0x1 + IP_PROTOCOL = 0x34 IP_RECVERR = 0xb IP_RECVERR_RFC4884 = 0x1a IP_RECVFRAGSIZE = 0x19 @@ -1698,6 +1710,7 @@ const ( KEXEC_ARCH_S390 = 0x160000 KEXEC_ARCH_SH = 0x2a0000 KEXEC_ARCH_X86_64 = 0x3e0000 + KEXEC_CRASH_HOTPLUG_SUPPORT = 0x8 KEXEC_FILE_DEBUG = 0x8 KEXEC_FILE_NO_INITRAMFS = 0x4 KEXEC_FILE_ON_CRASH = 0x2 @@ -1773,6 +1786,7 @@ const ( KEY_SPEC_USER_KEYRING = -0x4 KEY_SPEC_USER_SESSION_KEYRING = -0x5 LANDLOCK_ACCESS_FS_EXECUTE = 0x1 + LANDLOCK_ACCESS_FS_IOCTL_DEV = 0x8000 LANDLOCK_ACCESS_FS_MAKE_BLOCK = 0x800 LANDLOCK_ACCESS_FS_MAKE_CHAR = 0x40 LANDLOCK_ACCESS_FS_MAKE_DIR = 0x80 @@ -1790,6 +1804,8 @@ const ( LANDLOCK_ACCESS_NET_BIND_TCP = 0x1 LANDLOCK_ACCESS_NET_CONNECT_TCP = 0x2 LANDLOCK_CREATE_RULESET_VERSION = 0x1 + LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET = 0x1 + LANDLOCK_SCOPE_SIGNAL = 0x2 LINUX_REBOOT_CMD_CAD_OFF = 0x0 LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef LINUX_REBOOT_CMD_HALT = 0xcdef0123 @@ -1854,6 +1870,19 @@ const ( MAP_FILE = 0x0 MAP_FIXED = 0x10 MAP_FIXED_NOREPLACE = 0x100000 + MAP_HUGE_16GB = 0x88000000 + MAP_HUGE_16KB = 0x38000000 + MAP_HUGE_16MB = 0x60000000 + MAP_HUGE_1GB = 0x78000000 + MAP_HUGE_1MB = 0x50000000 + MAP_HUGE_256MB = 0x70000000 + MAP_HUGE_2GB = 0x7c000000 + MAP_HUGE_2MB = 0x54000000 + MAP_HUGE_32MB = 0x64000000 + MAP_HUGE_512KB = 0x4c000000 + MAP_HUGE_512MB = 0x74000000 + MAP_HUGE_64KB = 0x40000000 + MAP_HUGE_8MB = 0x5c000000 MAP_HUGE_MASK = 0x3f MAP_HUGE_SHIFT = 0x1a MAP_PRIVATE = 0x2 @@ -1901,6 +1930,8 @@ const ( MNT_EXPIRE = 0x4 MNT_FORCE = 0x1 MNT_ID_REQ_SIZE_VER0 = 0x18 + MNT_ID_REQ_SIZE_VER1 = 0x20 + MNT_NS_INFO_SIZE_VER0 = 0x10 MODULE_INIT_COMPRESSED_FILE = 0x4 MODULE_INIT_IGNORE_MODVERSIONS = 0x1 MODULE_INIT_IGNORE_VERMAGIC = 0x2 @@ -2166,10 +2197,10 @@ const ( NFT_REG_SIZE = 0x10 NFT_REJECT_ICMPX_MAX = 0x3 NFT_RT_MAX = 0x4 - NFT_SECMARK_CTX_MAXLEN = 0x100 + NFT_SECMARK_CTX_MAXLEN = 0x1000 NFT_SET_MAXNAMELEN = 0x100 NFT_SOCKET_MAX = 0x3 - NFT_TABLE_F_MASK = 0x3 + NFT_TABLE_F_MASK = 0x7 NFT_TABLE_MAXNAMELEN = 0x100 NFT_TRACETYPE_MAX = 0x3 NFT_TUNNEL_F_MASK = 0x7 @@ -2335,9 +2366,11 @@ const ( PERF_MEM_LVLNUM_IO = 0xa PERF_MEM_LVLNUM_L1 = 0x1 PERF_MEM_LVLNUM_L2 = 0x2 + PERF_MEM_LVLNUM_L2_MHB = 0x5 PERF_MEM_LVLNUM_L3 = 0x3 PERF_MEM_LVLNUM_L4 = 0x4 PERF_MEM_LVLNUM_LFB = 0xc + PERF_MEM_LVLNUM_MSC = 0x6 PERF_MEM_LVLNUM_NA = 0xf PERF_MEM_LVLNUM_PMEM = 0xe PERF_MEM_LVLNUM_RAM = 0xd @@ -2403,12 +2436,14 @@ const ( PERF_RECORD_MISC_USER = 0x2 PERF_SAMPLE_BRANCH_PLM_ALL = 0x7 PERF_SAMPLE_WEIGHT_TYPE = 0x1004000 + PID_FS_MAGIC = 0x50494446 PIPEFS_MAGIC = 0x50495045 PPPIOCGNPMODE = 0xc008744c PPPIOCNEWUNIT = 0xc004743e PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 + PROCFS_IOCTL_MAGIC = 'f' PROC_SUPER_MAGIC = 0x9fa0 PROT_EXEC = 0x4 PROT_GROWSDOWN = 0x1000000 @@ -2490,6 +2525,23 @@ const ( PR_PAC_GET_ENABLED_KEYS = 0x3d PR_PAC_RESET_KEYS = 0x36 PR_PAC_SET_ENABLED_KEYS = 0x3c + PR_PPC_DEXCR_CTRL_CLEAR = 0x4 + PR_PPC_DEXCR_CTRL_CLEAR_ONEXEC = 0x10 + PR_PPC_DEXCR_CTRL_EDITABLE = 0x1 + PR_PPC_DEXCR_CTRL_MASK = 0x1f + PR_PPC_DEXCR_CTRL_SET = 0x2 + PR_PPC_DEXCR_CTRL_SET_ONEXEC = 0x8 + PR_PPC_DEXCR_IBRTPD = 0x1 + PR_PPC_DEXCR_NPHIE = 0x3 + PR_PPC_DEXCR_SBHE = 0x0 + PR_PPC_DEXCR_SRAPD = 0x2 + PR_PPC_GET_DEXCR = 0x48 + PR_PPC_SET_DEXCR = 0x49 + PR_RISCV_CTX_SW_FENCEI_OFF = 0x1 + PR_RISCV_CTX_SW_FENCEI_ON = 0x0 + PR_RISCV_SCOPE_PER_PROCESS = 0x0 + PR_RISCV_SCOPE_PER_THREAD = 0x1 + PR_RISCV_SET_ICACHE_FLUSH_CTX = 0x47 PR_RISCV_V_GET_CONTROL = 0x46 PR_RISCV_V_SET_CONTROL = 0x45 PR_RISCV_V_VSTATE_CTRL_CUR_MASK = 0x3 @@ -2581,6 +2633,28 @@ const ( PR_UNALIGN_NOPRINT = 0x1 PR_UNALIGN_SIGBUS = 0x2 PSTOREFS_MAGIC = 0x6165676c + PTP_CLK_MAGIC = '=' + PTP_ENABLE_FEATURE = 0x1 + PTP_EXTTS_EDGES = 0x6 + PTP_EXTTS_EVENT_VALID = 0x1 + PTP_EXTTS_V1_VALID_FLAGS = 0x7 + PTP_EXTTS_VALID_FLAGS = 0x1f + PTP_EXT_OFFSET = 0x10 + PTP_FALLING_EDGE = 0x4 + PTP_MAX_SAMPLES = 0x19 + PTP_PEROUT_DUTY_CYCLE = 0x2 + PTP_PEROUT_ONE_SHOT = 0x1 + PTP_PEROUT_PHASE = 0x4 + PTP_PEROUT_V1_VALID_FLAGS = 0x0 + PTP_PEROUT_VALID_FLAGS = 0x7 + PTP_PIN_GETFUNC = 0xc0603d06 + PTP_PIN_GETFUNC2 = 0xc0603d0f + PTP_RISING_EDGE = 0x2 + PTP_STRICT_FLAGS = 0x8 + PTP_SYS_OFFSET_EXTENDED = 0xc4c03d09 + PTP_SYS_OFFSET_EXTENDED2 = 0xc4c03d12 + PTP_SYS_OFFSET_PRECISE = 0xc0403d08 + PTP_SYS_OFFSET_PRECISE2 = 0xc0403d11 PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 @@ -2894,14 +2968,17 @@ const ( RUSAGE_SELF = 0x0 RUSAGE_THREAD = 0x1 RWF_APPEND = 0x10 + RWF_ATOMIC = 0x40 RWF_DSYNC = 0x2 RWF_HIPRI = 0x1 + RWF_NOAPPEND = 0x20 RWF_NOWAIT = 0x8 - RWF_SUPPORTED = 0x1f + RWF_SUPPORTED = 0x7f RWF_SYNC = 0x4 RWF_WRITE_LIFE_NOT_SET = 0x0 SCHED_BATCH = 0x3 SCHED_DEADLINE = 0x6 + SCHED_EXT = 0x7 SCHED_FIFO = 0x1 SCHED_FLAG_ALL = 0x7f SCHED_FLAG_DL_OVERRUN = 0x4 @@ -2918,7 +2995,9 @@ const ( SCHED_RESET_ON_FORK = 0x40000000 SCHED_RR = 0x2 SCM_CREDENTIALS = 0x2 + SCM_PIDFD = 0x4 SCM_RIGHTS = 0x1 + SCM_SECURITY = 0x3 SCM_TIMESTAMP = 0x1d SC_LOG_FLUSH = 0x100000 SECCOMP_ADDFD_FLAG_SEND = 0x2 @@ -3051,6 +3130,8 @@ const ( SIOCSMIIREG = 0x8949 SIOCSRARP = 0x8962 SIOCWANDEV = 0x894a + SK_DIAG_BPF_STORAGE_MAX = 0x3 + SK_DIAG_BPF_STORAGE_REQ_MAX = 0x1 SMACK_MAGIC = 0x43415d53 SMART_AUTOSAVE = 0xd2 SMART_AUTO_OFFLINE = 0xdb @@ -3071,6 +3152,8 @@ const ( SOCKFS_MAGIC = 0x534f434b SOCK_BUF_LOCK_MASK = 0x3 SOCK_DCCP = 0x6 + SOCK_DESTROY = 0x15 + SOCK_DIAG_BY_FAMILY = 0x14 SOCK_IOC_TYPE = 0x89 SOCK_PACKET = 0xa SOCK_RAW = 0x3 @@ -3164,6 +3247,7 @@ const ( STATX_ATTR_MOUNT_ROOT = 0x2000 STATX_ATTR_NODUMP = 0x40 STATX_ATTR_VERITY = 0x100000 + STATX_ATTR_WRITE_ATOMIC = 0x400000 STATX_BASIC_STATS = 0x7ff STATX_BLOCKS = 0x400 STATX_BTIME = 0x800 @@ -3177,8 +3261,10 @@ const ( STATX_MTIME = 0x40 STATX_NLINK = 0x4 STATX_SIZE = 0x200 + STATX_SUBVOL = 0x8000 STATX_TYPE = 0x1 STATX_UID = 0x8 + STATX_WRITE_ATOMIC = 0x10000 STATX__RESERVED = 0x80000000 SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 @@ -3260,6 +3346,7 @@ const ( TCP_MAX_WINSHIFT = 0xe TCP_MD5SIG = 0xe TCP_MD5SIG_EXT = 0x20 + TCP_MD5SIG_FLAG_IFINDEX = 0x2 TCP_MD5SIG_FLAG_PREFIX = 0x1 TCP_MD5SIG_MAXKEYLEN = 0x50 TCP_MSS = 0x200 @@ -3576,6 +3663,7 @@ const ( XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 XDP_UMEM_PGOFF_FILL_RING = 0x100000000 XDP_UMEM_REG = 0x4 + XDP_UMEM_TX_METADATA_LEN = 0x4 XDP_UMEM_TX_SW_CSUM = 0x2 XDP_UMEM_UNALIGNED_CHUNK_FLAG = 0x1 XDP_USE_NEED_WAKEUP = 0x8 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 42ff8c3c..c0d45e32 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -107,6 +109,7 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -118,6 +121,7 @@ const ( IXOFF = 0x1000 IXON = 0x400 MAP_32BIT = 0x40 + MAP_ABOVE4G = 0x80 MAP_ANON = 0x20 MAP_ANONYMOUS = 0x20 MAP_DENYWRITE = 0x800 @@ -150,9 +154,14 @@ const ( NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 @@ -229,6 +238,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_GETFPREGS = 0xe PTRACE_GETFPXREGS = 0x12 PTRACE_GET_THREAD_AREA = 0x19 @@ -275,6 +298,8 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -313,6 +338,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index dca43600..c731d24f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -107,6 +109,7 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -118,6 +121,7 @@ const ( IXOFF = 0x1000 IXON = 0x400 MAP_32BIT = 0x40 + MAP_ABOVE4G = 0x80 MAP_ANON = 0x20 MAP_ANONYMOUS = 0x20 MAP_DENYWRITE = 0x800 @@ -150,9 +154,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 @@ -229,6 +238,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_ARCH_PRCTL = 0x1e PTRACE_GETFPREGS = 0xe PTRACE_GETFPXREGS = 0x12 @@ -276,6 +299,8 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -314,6 +339,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 5cca668a..680018a4 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -106,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -148,9 +151,14 @@ const ( NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 @@ -227,6 +235,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_GETCRUNCHREGS = 0x19 PTRACE_GETFDPIC = 0x1f PTRACE_GETFDPIC_EXEC = 0x0 @@ -282,6 +304,8 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -320,6 +344,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index d8cae6d1..a63909f3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 ESR_MAGIC = 0x45535201 EXTPROC = 0x10000 @@ -87,6 +89,7 @@ const ( FICLONE = 0x40049409 FICLONERANGE = 0x4020940d FLUSHO = 0x1000 + FPMR_MAGIC = 0x46504d52 FPSIMD_MAGIC = 0x46508001 FS_IOC_ENABLE_VERITY = 0x40806685 FS_IOC_GETFLAGS = 0x80086601 @@ -109,6 +112,7 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -151,9 +155,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 @@ -197,6 +206,7 @@ const ( PERF_EVENT_IOC_SET_BPF = 0x40042408 PERF_EVENT_IOC_SET_FILTER = 0x40082406 PERF_EVENT_IOC_SET_OUTPUT = 0x2405 + POE_MAGIC = 0x504f4530 PPPIOCATTACH = 0x4004743d PPPIOCATTCHAN = 0x40047438 PPPIOCBRIDGECHAN = 0x40047435 @@ -232,6 +242,20 @@ const ( PROT_BTI = 0x10 PROT_MTE = 0x20 PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_PEEKMTETAGS = 0x21 PTRACE_POKEMTETAGS = 0x22 PTRACE_SYSEMU = 0x1f @@ -272,6 +296,8 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -310,6 +336,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 28e39afd..9b0a2573 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -107,6 +109,7 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -152,9 +155,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 @@ -231,6 +239,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_SYSEMU = 0x1f PTRACE_SYSEMU_SINGLESTEP = 0x20 RLIMIT_AS = 0x9 @@ -269,6 +291,8 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -307,6 +331,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index cd66e92c..958e6e06 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x80 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -106,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 @@ -148,9 +151,14 @@ const ( NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 @@ -227,6 +235,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPREGS = 0xe PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 @@ -275,6 +297,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -313,6 +337,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index c1595eba..50c7f25b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x80 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -106,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 @@ -148,9 +151,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 @@ -227,6 +235,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPREGS = 0xe PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 @@ -275,6 +297,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -313,6 +337,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index ee9456b0..ced21d66 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x80 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -106,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 @@ -148,9 +151,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 @@ -227,6 +235,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPREGS = 0xe PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 @@ -275,6 +297,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -313,6 +337,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 8cfca81e..226c0441 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x80 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -106,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 @@ -148,9 +151,14 @@ const ( NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 @@ -227,6 +235,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPREGS = 0xe PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 @@ -275,6 +297,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -313,6 +337,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 60b0deb3..3122737c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x20 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000000 FF1 = 0x4000 @@ -106,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x4000 ICANON = 0x100 IEXTEN = 0x400 @@ -150,9 +153,14 @@ const ( NL3 = 0x300 NLDLY = 0x300 NOFLSH = 0x80000000 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x4 ONLCR = 0x2 @@ -230,6 +238,20 @@ const ( PPPIOCXFERUNIT = 0x2000744e PROT_SAO = 0x10 PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETEVRREGS = 0x14 PTRACE_GETFPREGS = 0xe PTRACE_GETREGS64 = 0x16 @@ -330,6 +352,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -368,6 +392,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index f90aa728..eb5d3467 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x20 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000000 FF1 = 0x4000 @@ -106,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x4000 ICANON = 0x100 IEXTEN = 0x400 @@ -150,9 +153,14 @@ const ( NL3 = 0x300 NLDLY = 0x300 NOFLSH = 0x80000000 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x4 ONLCR = 0x2 @@ -230,6 +238,20 @@ const ( PPPIOCXFERUNIT = 0x2000744e PROT_SAO = 0x10 PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETEVRREGS = 0x14 PTRACE_GETFPREGS = 0xe PTRACE_GETREGS64 = 0x16 @@ -334,6 +356,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -372,6 +396,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index ba9e0150..e921ebc6 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x20 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000000 FF1 = 0x4000 @@ -106,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x4000 ICANON = 0x100 IEXTEN = 0x400 @@ -150,9 +153,14 @@ const ( NL3 = 0x300 NLDLY = 0x300 NOFLSH = 0x80000000 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x4 ONLCR = 0x2 @@ -230,6 +238,20 @@ const ( PPPIOCXFERUNIT = 0x2000744e PROT_SAO = 0x10 PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETEVRREGS = 0x14 PTRACE_GETFPREGS = 0xe PTRACE_GETREGS64 = 0x16 @@ -334,6 +356,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -372,6 +396,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 07cdfd6e..38ba81c5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -106,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -148,9 +151,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 @@ -227,6 +235,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_GETFDPIC = 0x21 PTRACE_GETFDPIC_EXEC = 0x0 PTRACE_GETFDPIC_INTERP = 0x1 @@ -266,6 +288,8 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -304,6 +328,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 2f1dd214..71f04009 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -78,6 +78,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -106,6 +108,7 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -148,9 +151,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 @@ -227,6 +235,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_DISABLE_TE = 0x5010 PTRACE_ENABLE_TE = 0x5009 PTRACE_GET_LAST_BREAK = 0x5006 @@ -338,6 +360,8 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a @@ -376,6 +400,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index f40519d9..c44a3133 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -82,6 +82,8 @@ const ( EFD_CLOEXEC = 0x400000 EFD_NONBLOCK = 0x4000 EMT_TAGOVF = 0x1 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x400000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -110,6 +112,7 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 @@ -153,9 +156,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 @@ -232,6 +240,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPAREGS = 0x14 PTRACE_GETFPREGS = 0xe PTRACE_GETFPREGS64 = 0x19 @@ -329,6 +351,8 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x58 + SCM_DEVMEM_LINEAR = 0x57 SCM_TIMESTAMPING = 0x23 SCM_TIMESTAMPING_OPT_STATS = 0x38 SCM_TIMESTAMPING_PKTINFO = 0x3c @@ -415,6 +439,9 @@ const ( SO_CNX_ADVICE = 0x37 SO_COOKIE = 0x3b SO_DETACH_REUSEPORT_BPF = 0x47 + SO_DEVMEM_DMABUF = 0x58 + SO_DEVMEM_DONTNEED = 0x59 + SO_DEVMEM_LINEAR = 0x57 SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 diff --git a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go index da08b2ab..1ec2b140 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go @@ -581,6 +581,8 @@ const ( AT_EMPTY_PATH = 0x1000 AT_REMOVEDIR = 0x200 RENAME_NOREPLACE = 1 << 0 + ST_RDONLY = 1 + ST_NOSUID = 2 ) const ( diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index ccb02f24..24b346e1 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -740,6 +740,54 @@ func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func renamexNp(from string, to string, flag uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_renamex_np_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flag)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_renamex_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renamex_np renamex_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func renameatxNp(fromfd int, from string, tofd int, to string, flag uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_renameatx_np_trampoline_addr, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), uintptr(flag), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_renameatx_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renameatx_np renameatx_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -760,6 +808,59 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func pthread_chdir_np(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_pthread_chdir_np_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_chdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_chdir_np pthread_chdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pthread_fchdir_np(fd int) (err error) { + _, _, e1 := syscall_syscall(libc_pthread_fchdir_np_trampoline_addr, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_fchdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_fchdir_np pthread_fchdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) { + var _p0 unsafe.Pointer + if len(iov) > 0 { + _p0 = unsafe.Pointer(&iov[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall9(libc_connectx_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(endpoints)), uintptr(associd), uintptr(flags), uintptr(_p0), uintptr(len(iov)), uintptr(unsafe.Pointer(n)), uintptr(unsafe.Pointer(connid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_connectx_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_connectx connectx "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index 8b8bb284..ebd21310 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -223,11 +223,36 @@ TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB) +TEXT libc_renamex_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renamex_np(SB) +GLOBL ·libc_renamex_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_renamex_np_trampoline_addr(SB)/8, $libc_renamex_np_trampoline<>(SB) + +TEXT libc_renameatx_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renameatx_np(SB) +GLOBL ·libc_renameatx_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_renameatx_np_trampoline_addr(SB)/8, $libc_renameatx_np_trampoline<>(SB) + TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sysctl(SB) GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_pthread_chdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_chdir_np(SB) +GLOBL ·libc_pthread_chdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_chdir_np_trampoline_addr(SB)/8, $libc_pthread_chdir_np_trampoline<>(SB) + +TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_fchdir_np(SB) +GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB) + +TEXT libc_connectx_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_connectx(SB) +GLOBL ·libc_connectx_trampoline_addr(SB), RODATA, $8 +DATA ·libc_connectx_trampoline_addr(SB)/8, $libc_connectx_trampoline<>(SB) + TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 1b40b997..824b9c2d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -740,6 +740,54 @@ func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func renamexNp(from string, to string, flag uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_renamex_np_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flag)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_renamex_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renamex_np renamex_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func renameatxNp(fromfd int, from string, tofd int, to string, flag uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_renameatx_np_trampoline_addr, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), uintptr(flag), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_renameatx_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renameatx_np renameatx_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -760,6 +808,59 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func pthread_chdir_np(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_pthread_chdir_np_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_chdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_chdir_np pthread_chdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pthread_fchdir_np(fd int) (err error) { + _, _, e1 := syscall_syscall(libc_pthread_fchdir_np_trampoline_addr, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_fchdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_fchdir_np pthread_fchdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) { + var _p0 unsafe.Pointer + if len(iov) > 0 { + _p0 = unsafe.Pointer(&iov[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall9(libc_connectx_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(endpoints)), uintptr(associd), uintptr(flags), uintptr(_p0), uintptr(len(iov)), uintptr(unsafe.Pointer(n)), uintptr(unsafe.Pointer(connid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_connectx_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_connectx connectx "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index 08362c1a..4f178a22 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -223,11 +223,36 @@ TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB) +TEXT libc_renamex_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renamex_np(SB) +GLOBL ·libc_renamex_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_renamex_np_trampoline_addr(SB)/8, $libc_renamex_np_trampoline<>(SB) + +TEXT libc_renameatx_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renameatx_np(SB) +GLOBL ·libc_renameatx_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_renameatx_np_trampoline_addr(SB)/8, $libc_renameatx_np_trampoline<>(SB) + TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sysctl(SB) GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_pthread_chdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_chdir_np(SB) +GLOBL ·libc_pthread_chdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_chdir_np_trampoline_addr(SB)/8, $libc_pthread_chdir_np_trampoline<>(SB) + +TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_fchdir_np(SB) +GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB) + +TEXT libc_connectx_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_connectx(SB) +GLOBL ·libc_connectx_trampoline_addr(SB), RODATA, $8 +DATA ·libc_connectx_trampoline_addr(SB)/8, $libc_connectx_trampoline<>(SB) + TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 87d8612a..5cc1e8eb 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -592,6 +592,16 @@ func ClockGettime(clockid int32, time *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockSettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_SETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) { _, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0) if e1 != 0 { @@ -971,23 +981,6 @@ func Getpriority(which int, who int) (prio int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getrandom(buf []byte, flags int) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getrusage(who int, rusage *Rusage) (err error) { _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) if e1 != 0 { @@ -2229,3 +2222,19 @@ func Cachestat(fd uint, crange *CachestatRange, cstat *Cachestat_t, flags uint) } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mseal(b []byte, flags uint) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MSEAL, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index 9dc42410..1851df14 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s index 41b56173..0b43c693 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s @@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $4 DATA ·libc_mknodat_trampoline_addr(SB)/4, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mount_trampoline_addr(SB)/4, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $4 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index 0d3a0751..e1ec0dbe 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s index 4019a656..880c6d6e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s @@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index c39f7776..7c8452a6 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s index ac4af24f..b8ef95b0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s @@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $4 DATA ·libc_mknodat_trampoline_addr(SB)/4, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mount_trampoline_addr(SB)/4, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $4 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go index 57571d07..2ffdf861 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s index f77d5321..2af3b5c7 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s @@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go index e62963e6..1da08d52 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s index fae140b6..b7a25135 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s @@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go index 00831354..6e85b0aa 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s index 9d1e0ff0..f15dadf0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s @@ -555,6 +555,12 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_mount(SB) + RET +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 CALL libc_nanosleep(SB) RET diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go index 79029ed5..28b487df 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s index da115f9a..1e7f321e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s @@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index 53aef5dc..524b0820 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -457,4 +457,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 71d52476..f485dbf4 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -341,6 +341,7 @@ const ( SYS_STATX = 332 SYS_IO_PGETEVENTS = 333 SYS_RSEQ = 334 + SYS_URETPROBE = 335 SYS_PIDFD_SEND_SIGNAL = 424 SYS_IO_URING_SETUP = 425 SYS_IO_URING_ENTER = 426 @@ -379,4 +380,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index c7477061..70b35bf3 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -421,4 +421,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index f96e214f..1893e2fe 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -85,7 +85,7 @@ const ( SYS_SPLICE = 76 SYS_TEE = 77 SYS_READLINKAT = 78 - SYS_FSTATAT = 79 + SYS_NEWFSTATAT = 79 SYS_FSTAT = 80 SYS_SYNC = 81 SYS_FSYNC = 82 @@ -324,4 +324,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index 28425346..16a4017d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -84,6 +84,8 @@ const ( SYS_SPLICE = 76 SYS_TEE = 77 SYS_READLINKAT = 78 + SYS_NEWFSTATAT = 79 + SYS_FSTAT = 80 SYS_SYNC = 81 SYS_FSYNC = 82 SYS_FDATASYNC = 83 @@ -318,4 +320,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index d0953018..7e567f1e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -441,4 +441,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 4459 SYS_LSM_SET_SELF_ATTR = 4460 SYS_LSM_LIST_MODULES = 4461 + SYS_MSEAL = 4462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 295c7f4b..38ae55e5 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -371,4 +371,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 5459 SYS_LSM_SET_SELF_ATTR = 5460 SYS_LSM_LIST_MODULES = 5461 + SYS_MSEAL = 5462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index d1a9eaca..55e92e60 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -371,4 +371,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 5459 SYS_LSM_SET_SELF_ATTR = 5460 SYS_LSM_LIST_MODULES = 5461 + SYS_MSEAL = 5462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index bec157c3..60658d6a 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -441,4 +441,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 4459 SYS_LSM_SET_SELF_ATTR = 4460 SYS_LSM_LIST_MODULES = 4461 + SYS_MSEAL = 4462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index 7ee7bdc4..e203e8a7 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -448,4 +448,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index fad1f25b..5944b97d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -420,4 +420,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index 7d3e1635..c66d416d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -420,4 +420,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 0ed53ad9..a5459e76 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -84,7 +84,7 @@ const ( SYS_SPLICE = 76 SYS_TEE = 77 SYS_READLINKAT = 78 - SYS_FSTATAT = 79 + SYS_NEWFSTATAT = 79 SYS_FSTAT = 80 SYS_SYNC = 81 SYS_FSYNC = 82 @@ -325,4 +325,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index 2fba04ad..01d86825 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -386,4 +386,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 621d00d7..7b703e77 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -399,4 +399,5 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index 091d107f..17c53bd9 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -306,6 +306,19 @@ type XVSockPgen struct { type _Socklen uint32 +type SaeAssocID uint32 + +type SaeConnID uint32 + +type SaEndpoints struct { + Srcif uint32 + Srcaddr *RawSockaddr + Srcaddrlen uint32 + Dstaddr *RawSockaddr + Dstaddrlen uint32 + _ [4]byte +} + type Xucred struct { Version uint32 Uid uint32 @@ -449,11 +462,14 @@ type FdSet struct { const ( SizeofIfMsghdr = 0x70 + SizeofIfMsghdr2 = 0xa0 SizeofIfData = 0x60 + SizeofIfData64 = 0x80 SizeofIfaMsghdr = 0x14 SizeofIfmaMsghdr = 0x10 SizeofIfmaMsghdr2 = 0x14 SizeofRtMsghdr = 0x5c + SizeofRtMsghdr2 = 0x5c SizeofRtMetrics = 0x38 ) @@ -467,6 +483,20 @@ type IfMsghdr struct { Data IfData } +type IfMsghdr2 struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Snd_len int32 + Snd_maxlen int32 + Snd_drops int32 + Timer int32 + Data IfData64 +} + type IfData struct { Type uint8 Typelen uint8 @@ -499,6 +529,34 @@ type IfData struct { Reserved2 uint32 } +type IfData64 struct { + Type uint8 + Typelen uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Recvquota uint8 + Xmitquota uint8 + Unused1 uint8 + Mtu uint32 + Metric uint32 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Noproto uint64 + Recvtiming uint32 + Xmittiming uint32 + Lastchange Timeval32 +} + type IfaMsghdr struct { Msglen uint16 Version uint8 @@ -544,6 +602,21 @@ type RtMsghdr struct { Rmx RtMetrics } +type RtMsghdr2 struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Flags int32 + Addrs int32 + Refcnt int32 + Parentflags int32 + Reserved int32 + Use int32 + Inits uint32 + Rmx RtMetrics +} + type RtMetrics struct { Locks uint32 Mtu uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index 28ff4ef7..2392226a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -306,6 +306,19 @@ type XVSockPgen struct { type _Socklen uint32 +type SaeAssocID uint32 + +type SaeConnID uint32 + +type SaEndpoints struct { + Srcif uint32 + Srcaddr *RawSockaddr + Srcaddrlen uint32 + Dstaddr *RawSockaddr + Dstaddrlen uint32 + _ [4]byte +} + type Xucred struct { Version uint32 Uid uint32 @@ -449,11 +462,14 @@ type FdSet struct { const ( SizeofIfMsghdr = 0x70 + SizeofIfMsghdr2 = 0xa0 SizeofIfData = 0x60 + SizeofIfData64 = 0x80 SizeofIfaMsghdr = 0x14 SizeofIfmaMsghdr = 0x10 SizeofIfmaMsghdr2 = 0x14 SizeofRtMsghdr = 0x5c + SizeofRtMsghdr2 = 0x5c SizeofRtMetrics = 0x38 ) @@ -467,6 +483,20 @@ type IfMsghdr struct { Data IfData } +type IfMsghdr2 struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Snd_len int32 + Snd_maxlen int32 + Snd_drops int32 + Timer int32 + Data IfData64 +} + type IfData struct { Type uint8 Typelen uint8 @@ -499,6 +529,34 @@ type IfData struct { Reserved2 uint32 } +type IfData64 struct { + Type uint8 + Typelen uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Recvquota uint8 + Xmitquota uint8 + Unused1 uint8 + Mtu uint32 + Metric uint32 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Noproto uint64 + Recvtiming uint32 + Xmittiming uint32 + Lastchange Timeval32 +} + type IfaMsghdr struct { Msglen uint16 Version uint8 @@ -544,6 +602,21 @@ type RtMsghdr struct { Rmx RtMetrics } +type RtMsghdr2 struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Flags int32 + Addrs int32 + Refcnt int32 + Parentflags int32 + Reserved int32 + Use int32 + Inits uint32 + Rmx RtMetrics +} + type RtMetrics struct { Locks uint32 Mtu uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go index 6cbd094a..51e13eb0 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go @@ -625,6 +625,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go index 7c03b6ee..d002d8ef 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go @@ -630,6 +630,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go index 422107ee..3f863d89 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -616,6 +616,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go index 505a12ac..61c72931 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go @@ -610,6 +610,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go index cc986c79..b5d17414 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go @@ -612,6 +612,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 0036746e..5537148d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -87,30 +87,35 @@ type StatxTimestamp struct { } type Statx_t struct { - Mask uint32 - Blksize uint32 - Attributes uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Mode uint16 - _ [1]uint16 - Ino uint64 - Size uint64 - Blocks uint64 - Attributes_mask uint64 - Atime StatxTimestamp - Btime StatxTimestamp - Ctime StatxTimestamp - Mtime StatxTimestamp - Rdev_major uint32 - Rdev_minor uint32 - Dev_major uint32 - Dev_minor uint32 - Mnt_id uint64 - Dio_mem_align uint32 - Dio_offset_align uint32 - _ [12]uint64 + Mask uint32 + Blksize uint32 + Attributes uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Mode uint16 + _ [1]uint16 + Ino uint64 + Size uint64 + Blocks uint64 + Attributes_mask uint64 + Atime StatxTimestamp + Btime StatxTimestamp + Ctime StatxTimestamp + Mtime StatxTimestamp + Rdev_major uint32 + Rdev_minor uint32 + Dev_major uint32 + Dev_minor uint32 + Mnt_id uint64 + Dio_mem_align uint32 + Dio_offset_align uint32 + Subvol uint64 + Atomic_write_unit_min uint32 + Atomic_write_unit_max uint32 + Atomic_write_segments_max uint32 + _ [1]uint32 + _ [9]uint64 } type Fsid struct { @@ -515,6 +520,29 @@ type TCPInfo struct { Total_rto_time uint32 } +type TCPVegasInfo struct { + Enabled uint32 + Rttcnt uint32 + Rtt uint32 + Minrtt uint32 +} + +type TCPDCTCPInfo struct { + Enabled uint16 + Ce_state uint16 + Alpha uint32 + Ab_ecn uint32 + Ab_tot uint32 +} + +type TCPBBRInfo struct { + Bw_lo uint32 + Bw_hi uint32 + Min_rtt uint32 + Pacing_gain uint32 + Cwnd_gain uint32 +} + type CanFilter struct { Id uint32 Mask uint32 @@ -556,6 +584,7 @@ const ( SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc SizeofTCPInfo = 0xf8 + SizeofTCPCCInfo = 0x14 SizeofCanFilter = 0x8 SizeofTCPRepairOpt = 0x8 ) @@ -1723,12 +1752,6 @@ const ( IFLA_IPVLAN_UNSPEC = 0x0 IFLA_IPVLAN_MODE = 0x1 IFLA_IPVLAN_FLAGS = 0x2 - NETKIT_NEXT = -0x1 - NETKIT_PASS = 0x0 - NETKIT_DROP = 0x2 - NETKIT_REDIRECT = 0x7 - NETKIT_L2 = 0x0 - NETKIT_L3 = 0x1 IFLA_NETKIT_UNSPEC = 0x0 IFLA_NETKIT_PEER_INFO = 0x1 IFLA_NETKIT_PRIMARY = 0x2 @@ -1767,6 +1790,7 @@ const ( IFLA_VXLAN_DF = 0x1d IFLA_VXLAN_VNIFILTER = 0x1e IFLA_VXLAN_LOCALBYPASS = 0x1f + IFLA_VXLAN_LABEL_POLICY = 0x20 IFLA_GENEVE_UNSPEC = 0x0 IFLA_GENEVE_ID = 0x1 IFLA_GENEVE_REMOTE = 0x2 @@ -1796,6 +1820,8 @@ const ( IFLA_GTP_ROLE = 0x4 IFLA_GTP_CREATE_SOCKETS = 0x5 IFLA_GTP_RESTART_COUNT = 0x6 + IFLA_GTP_LOCAL = 0x7 + IFLA_GTP_LOCAL6 = 0x8 IFLA_BOND_UNSPEC = 0x0 IFLA_BOND_MODE = 0x1 IFLA_BOND_ACTIVE_SLAVE = 0x2 @@ -1828,6 +1854,7 @@ const ( IFLA_BOND_AD_LACP_ACTIVE = 0x1d IFLA_BOND_MISSED_MAX = 0x1e IFLA_BOND_NS_IP6_TARGET = 0x1f + IFLA_BOND_COUPLED_CONTROL = 0x20 IFLA_BOND_AD_INFO_UNSPEC = 0x0 IFLA_BOND_AD_INFO_AGGREGATOR = 0x1 IFLA_BOND_AD_INFO_NUM_PORTS = 0x2 @@ -1896,6 +1923,7 @@ const ( IFLA_HSR_SEQ_NR = 0x5 IFLA_HSR_VERSION = 0x6 IFLA_HSR_PROTOCOL = 0x7 + IFLA_HSR_INTERLINK = 0x8 IFLA_STATS_UNSPEC = 0x0 IFLA_STATS_LINK_64 = 0x1 IFLA_STATS_LINK_XSTATS = 0x2 @@ -1948,6 +1976,15 @@ const ( IFLA_DSA_MASTER = 0x1 ) +const ( + NETKIT_NEXT = -0x1 + NETKIT_PASS = 0x0 + NETKIT_DROP = 0x2 + NETKIT_REDIRECT = 0x7 + NETKIT_L2 = 0x0 + NETKIT_L3 = 0x1 +) + const ( NF_INET_PRE_ROUTING = 0x0 NF_INET_LOCAL_IN = 0x1 @@ -2485,7 +2522,7 @@ type XDPMmapOffsets struct { type XDPUmemReg struct { Addr uint64 Len uint64 - Chunk_size uint32 + Size uint32 Headroom uint32 Flags uint32 Tx_metadata_len uint32 @@ -2557,8 +2594,8 @@ const ( SOF_TIMESTAMPING_BIND_PHC = 0x8000 SOF_TIMESTAMPING_OPT_ID_TCP = 0x10000 - SOF_TIMESTAMPING_LAST = 0x10000 - SOF_TIMESTAMPING_MASK = 0x1ffff + SOF_TIMESTAMPING_LAST = 0x20000 + SOF_TIMESTAMPING_MASK = 0x3ffff SCM_TSTAMP_SND = 0x0 SCM_TSTAMP_SCHED = 0x1 @@ -3473,7 +3510,7 @@ const ( DEVLINK_PORT_FN_ATTR_STATE = 0x2 DEVLINK_PORT_FN_ATTR_OPSTATE = 0x3 DEVLINK_PORT_FN_ATTR_CAPS = 0x4 - DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x5 + DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x6 ) type FsverityDigest struct { @@ -3504,7 +3541,7 @@ type Nhmsg struct { type NexthopGrp struct { Id uint32 Weight uint8 - Resvd1 uint8 + High uint8 Resvd2 uint16 } @@ -3765,7 +3802,7 @@ const ( ETHTOOL_MSG_PSE_GET = 0x24 ETHTOOL_MSG_PSE_SET = 0x25 ETHTOOL_MSG_RSS_GET = 0x26 - ETHTOOL_MSG_USER_MAX = 0x2b + ETHTOOL_MSG_USER_MAX = 0x2d ETHTOOL_MSG_KERNEL_NONE = 0x0 ETHTOOL_MSG_STRSET_GET_REPLY = 0x1 ETHTOOL_MSG_LINKINFO_GET_REPLY = 0x2 @@ -3805,12 +3842,15 @@ const ( ETHTOOL_MSG_MODULE_NTF = 0x24 ETHTOOL_MSG_PSE_GET_REPLY = 0x25 ETHTOOL_MSG_RSS_GET_REPLY = 0x26 - ETHTOOL_MSG_KERNEL_MAX = 0x2b + ETHTOOL_MSG_KERNEL_MAX = 0x2e + ETHTOOL_FLAG_COMPACT_BITSETS = 0x1 + ETHTOOL_FLAG_OMIT_REPLY = 0x2 + ETHTOOL_FLAG_STATS = 0x4 ETHTOOL_A_HEADER_UNSPEC = 0x0 ETHTOOL_A_HEADER_DEV_INDEX = 0x1 ETHTOOL_A_HEADER_DEV_NAME = 0x2 ETHTOOL_A_HEADER_FLAGS = 0x3 - ETHTOOL_A_HEADER_MAX = 0x3 + ETHTOOL_A_HEADER_MAX = 0x4 ETHTOOL_A_BITSET_BIT_UNSPEC = 0x0 ETHTOOL_A_BITSET_BIT_INDEX = 0x1 ETHTOOL_A_BITSET_BIT_NAME = 0x2 @@ -3947,7 +3987,7 @@ const ( ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL = 0x17 ETHTOOL_A_COALESCE_USE_CQE_MODE_TX = 0x18 ETHTOOL_A_COALESCE_USE_CQE_MODE_RX = 0x19 - ETHTOOL_A_COALESCE_MAX = 0x1c + ETHTOOL_A_COALESCE_MAX = 0x1e ETHTOOL_A_PAUSE_UNSPEC = 0x0 ETHTOOL_A_PAUSE_HEADER = 0x1 ETHTOOL_A_PAUSE_AUTONEG = 0x2 @@ -3975,7 +4015,7 @@ const ( ETHTOOL_A_TSINFO_TX_TYPES = 0x3 ETHTOOL_A_TSINFO_RX_FILTERS = 0x4 ETHTOOL_A_TSINFO_PHC_INDEX = 0x5 - ETHTOOL_A_TSINFO_MAX = 0x5 + ETHTOOL_A_TSINFO_MAX = 0x6 ETHTOOL_A_CABLE_TEST_UNSPEC = 0x0 ETHTOOL_A_CABLE_TEST_HEADER = 0x1 ETHTOOL_A_CABLE_TEST_MAX = 0x1 @@ -3991,11 +4031,11 @@ const ( ETHTOOL_A_CABLE_RESULT_UNSPEC = 0x0 ETHTOOL_A_CABLE_RESULT_PAIR = 0x1 ETHTOOL_A_CABLE_RESULT_CODE = 0x2 - ETHTOOL_A_CABLE_RESULT_MAX = 0x2 + ETHTOOL_A_CABLE_RESULT_MAX = 0x3 ETHTOOL_A_CABLE_FAULT_LENGTH_UNSPEC = 0x0 ETHTOOL_A_CABLE_FAULT_LENGTH_PAIR = 0x1 ETHTOOL_A_CABLE_FAULT_LENGTH_CM = 0x2 - ETHTOOL_A_CABLE_FAULT_LENGTH_MAX = 0x2 + ETHTOOL_A_CABLE_FAULT_LENGTH_MAX = 0x3 ETHTOOL_A_CABLE_TEST_NTF_STATUS_UNSPEC = 0x0 ETHTOOL_A_CABLE_TEST_NTF_STATUS_STARTED = 0x1 ETHTOOL_A_CABLE_TEST_NTF_STATUS_COMPLETED = 0x2 @@ -4078,6 +4118,107 @@ type EthtoolDrvinfo struct { Regdump_len uint32 } +type EthtoolTsInfo struct { + Cmd uint32 + So_timestamping uint32 + Phc_index int32 + Tx_types uint32 + Tx_reserved [3]uint32 + Rx_filters uint32 + Rx_reserved [3]uint32 +} + +type HwTstampConfig struct { + Flags int32 + Tx_type int32 + Rx_filter int32 +} + +const ( + HWTSTAMP_FILTER_NONE = 0x0 + HWTSTAMP_FILTER_ALL = 0x1 + HWTSTAMP_FILTER_SOME = 0x2 + HWTSTAMP_FILTER_PTP_V1_L4_EVENT = 0x3 + HWTSTAMP_FILTER_PTP_V2_L4_EVENT = 0x6 + HWTSTAMP_FILTER_PTP_V2_L2_EVENT = 0x9 + HWTSTAMP_FILTER_PTP_V2_EVENT = 0xc +) + +const ( + HWTSTAMP_TX_OFF = 0x0 + HWTSTAMP_TX_ON = 0x1 + HWTSTAMP_TX_ONESTEP_SYNC = 0x2 +) + +type ( + PtpClockCaps struct { + Max_adj int32 + N_alarm int32 + N_ext_ts int32 + N_per_out int32 + Pps int32 + N_pins int32 + Cross_timestamping int32 + Adjust_phase int32 + Max_phase_adj int32 + Rsv [11]int32 + } + PtpClockTime struct { + Sec int64 + Nsec uint32 + Reserved uint32 + } + PtpExttsEvent struct { + T PtpClockTime + Index uint32 + Flags uint32 + Rsv [2]uint32 + } + PtpExttsRequest struct { + Index uint32 + Flags uint32 + Rsv [2]uint32 + } + PtpPeroutRequest struct { + StartOrPhase PtpClockTime + Period PtpClockTime + Index uint32 + Flags uint32 + On PtpClockTime + } + PtpPinDesc struct { + Name [64]byte + Index uint32 + Func uint32 + Chan uint32 + Rsv [5]uint32 + } + PtpSysOffset struct { + Samples uint32 + Rsv [3]uint32 + Ts [51]PtpClockTime + } + PtpSysOffsetExtended struct { + Samples uint32 + Clockid int32 + Rsv [2]uint32 + Ts [25][3]PtpClockTime + } + PtpSysOffsetPrecise struct { + Device PtpClockTime + Realtime PtpClockTime + Monoraw PtpClockTime + Rsv [4]uint32 + } +) + +const ( + PTP_PF_NONE = 0x0 + PTP_PF_EXTTS = 0x1 + PTP_PF_PEROUT = 0x2 + PTP_PF_PHYSYNC = 0x3 +) + type ( HIDRawReportDescriptor struct { Size uint32 @@ -4259,6 +4400,7 @@ const ( type LandlockRulesetAttr struct { Access_fs uint64 Access_net uint64 + Scoped uint64 } type LandlockPathBeneathAttr struct { @@ -4605,7 +4747,7 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x149 + NL80211_ATTR_MAX = 0x14c NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce NL80211_ATTR_MAX_MATCH_SETS = 0x85 @@ -5209,7 +5351,7 @@ const ( NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf - NL80211_FREQUENCY_ATTR_MAX = 0x1f + NL80211_FREQUENCY_ATTR_MAX = 0x21 NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6 NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc @@ -5703,7 +5845,7 @@ const ( NL80211_STA_FLAG_ASSOCIATED = 0x7 NL80211_STA_FLAG_AUTHENTICATED = 0x5 NL80211_STA_FLAG_AUTHORIZED = 0x1 - NL80211_STA_FLAG_MAX = 0x7 + NL80211_STA_FLAG_MAX = 0x8 NL80211_STA_FLAG_MAX_OLD_API = 0x6 NL80211_STA_FLAG_MFP = 0x4 NL80211_STA_FLAG_SHORT_PREAMBLE = 0x2 @@ -6001,3 +6143,34 @@ type CachestatRange struct { Off uint64 Len uint64 } + +const ( + SK_MEMINFO_RMEM_ALLOC = 0x0 + SK_MEMINFO_RCVBUF = 0x1 + SK_MEMINFO_WMEM_ALLOC = 0x2 + SK_MEMINFO_SNDBUF = 0x3 + SK_MEMINFO_FWD_ALLOC = 0x4 + SK_MEMINFO_WMEM_QUEUED = 0x5 + SK_MEMINFO_OPTMEM = 0x6 + SK_MEMINFO_BACKLOG = 0x7 + SK_MEMINFO_DROPS = 0x8 + SK_MEMINFO_VARS = 0x9 + SKNLGRP_NONE = 0x0 + SKNLGRP_INET_TCP_DESTROY = 0x1 + SKNLGRP_INET_UDP_DESTROY = 0x2 + SKNLGRP_INET6_TCP_DESTROY = 0x3 + SKNLGRP_INET6_UDP_DESTROY = 0x4 + SK_DIAG_BPF_STORAGE_REQ_NONE = 0x0 + SK_DIAG_BPF_STORAGE_REQ_MAP_FD = 0x1 + SK_DIAG_BPF_STORAGE_REP_NONE = 0x0 + SK_DIAG_BPF_STORAGE = 0x1 + SK_DIAG_BPF_STORAGE_NONE = 0x0 + SK_DIAG_BPF_STORAGE_PAD = 0x1 + SK_DIAG_BPF_STORAGE_MAP_ID = 0x2 + SK_DIAG_BPF_STORAGE_MAP_VALUE = 0x3 +) + +type SockDiagReq struct { + Family uint8 + Protocol uint8 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 15adc041..ad05b51a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -727,6 +727,37 @@ const ( RISCV_HWPROBE_EXT_ZBA = 0x8 RISCV_HWPROBE_EXT_ZBB = 0x10 RISCV_HWPROBE_EXT_ZBS = 0x20 + RISCV_HWPROBE_EXT_ZICBOZ = 0x40 + RISCV_HWPROBE_EXT_ZBC = 0x80 + RISCV_HWPROBE_EXT_ZBKB = 0x100 + RISCV_HWPROBE_EXT_ZBKC = 0x200 + RISCV_HWPROBE_EXT_ZBKX = 0x400 + RISCV_HWPROBE_EXT_ZKND = 0x800 + RISCV_HWPROBE_EXT_ZKNE = 0x1000 + RISCV_HWPROBE_EXT_ZKNH = 0x2000 + RISCV_HWPROBE_EXT_ZKSED = 0x4000 + RISCV_HWPROBE_EXT_ZKSH = 0x8000 + RISCV_HWPROBE_EXT_ZKT = 0x10000 + RISCV_HWPROBE_EXT_ZVBB = 0x20000 + RISCV_HWPROBE_EXT_ZVBC = 0x40000 + RISCV_HWPROBE_EXT_ZVKB = 0x80000 + RISCV_HWPROBE_EXT_ZVKG = 0x100000 + RISCV_HWPROBE_EXT_ZVKNED = 0x200000 + RISCV_HWPROBE_EXT_ZVKNHA = 0x400000 + RISCV_HWPROBE_EXT_ZVKNHB = 0x800000 + RISCV_HWPROBE_EXT_ZVKSED = 0x1000000 + RISCV_HWPROBE_EXT_ZVKSH = 0x2000000 + RISCV_HWPROBE_EXT_ZVKT = 0x4000000 + RISCV_HWPROBE_EXT_ZFH = 0x8000000 + RISCV_HWPROBE_EXT_ZFHMIN = 0x10000000 + RISCV_HWPROBE_EXT_ZIHINTNTL = 0x20000000 + RISCV_HWPROBE_EXT_ZVFH = 0x40000000 + RISCV_HWPROBE_EXT_ZVFHMIN = 0x80000000 + RISCV_HWPROBE_EXT_ZFA = 0x100000000 + RISCV_HWPROBE_EXT_ZTSO = 0x200000000 + RISCV_HWPROBE_EXT_ZACAS = 0x400000000 + RISCV_HWPROBE_EXT_ZICOND = 0x800000000 + RISCV_HWPROBE_EXT_ZIHINTPAUSE = 0x1000000000 RISCV_HWPROBE_KEY_CPUPERF_0 = 0x5 RISCV_HWPROBE_MISALIGNED_UNKNOWN = 0x0 RISCV_HWPROBE_MISALIGNED_EMULATED = 0x1 @@ -734,4 +765,6 @@ const ( RISCV_HWPROBE_MISALIGNED_FAST = 0x3 RISCV_HWPROBE_MISALIGNED_UNSUPPORTED = 0x4 RISCV_HWPROBE_MISALIGNED_MASK = 0x7 + RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE = 0x6 + RISCV_HWPROBE_WHICH_CPUS = 0x1 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go index d9a13af4..2e5d5a44 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go @@ -377,6 +377,12 @@ type Flock_t struct { Pid int32 } +type F_cnvrt struct { + Cvtcmd int32 + Pccsid int16 + Fccsid int16 +} + type Termios struct { Cflag uint32 Iflag uint32 diff --git a/vendor/golang.org/x/sys/windows/dll_windows.go b/vendor/golang.org/x/sys/windows/dll_windows.go index 115341fb..4e613cf6 100644 --- a/vendor/golang.org/x/sys/windows/dll_windows.go +++ b/vendor/golang.org/x/sys/windows/dll_windows.go @@ -65,7 +65,7 @@ func LoadDLL(name string) (dll *DLL, err error) { return d, nil } -// MustLoadDLL is like LoadDLL but panics if load operation failes. +// MustLoadDLL is like LoadDLL but panics if load operation fails. func MustLoadDLL(name string) *DLL { d, e := LoadDLL(name) if e != nil { diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go index 26be94a8..b6e1ab76 100644 --- a/vendor/golang.org/x/sys/windows/security_windows.go +++ b/vendor/golang.org/x/sys/windows/security_windows.go @@ -68,6 +68,7 @@ type UserInfo10 struct { //sys NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) = netapi32.NetUserGetInfo //sys NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (neterr error) = netapi32.NetGetJoinInformation //sys NetApiBufferFree(buf *byte) (neterr error) = netapi32.NetApiBufferFree +//sys NetUserEnum(serverName *uint16, level uint32, filter uint32, buf **byte, prefMaxLen uint32, entriesRead *uint32, totalEntries *uint32, resumeHandle *uint32) (neterr error) = netapi32.NetUserEnum const ( // do not reorder @@ -893,7 +894,7 @@ type ACL struct { aclRevision byte sbz1 byte aclSize uint16 - aceCount uint16 + AceCount uint16 sbz2 uint16 } @@ -1086,6 +1087,27 @@ type EXPLICIT_ACCESS struct { Trustee TRUSTEE } +// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-ace_header +type ACE_HEADER struct { + AceType uint8 + AceFlags uint8 + AceSize uint16 +} + +// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-access_allowed_ace +type ACCESS_ALLOWED_ACE struct { + Header ACE_HEADER + Mask ACCESS_MASK + SidStart uint32 +} + +const ( + // Constants for AceType + // https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-ace_header + ACCESS_ALLOWED_ACE_TYPE = 0 + ACCESS_DENIED_ACE_TYPE = 1 +) + // This type is the union inside of TRUSTEE and must be created using one of the TrusteeValueFrom* functions. type TrusteeValue uintptr @@ -1157,6 +1179,7 @@ type OBJECTS_AND_NAME struct { //sys makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURITY_DESCRIPTOR, selfRelativeSDSize *uint32) (err error) = advapi32.MakeSelfRelativeSD //sys setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCESS, oldACL *ACL, newACL **ACL) (ret error) = advapi32.SetEntriesInAclW +//sys GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (err error) = advapi32.GetAce // Control returns the security descriptor control bits. func (sd *SECURITY_DESCRIPTOR) Control() (control SECURITY_DESCRIPTOR_CONTROL, revision uint32, err error) { diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 6525c62f..4a325438 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -17,8 +17,10 @@ import ( "unsafe" ) -type Handle uintptr -type HWND uintptr +type ( + Handle uintptr + HWND uintptr +) const ( InvalidHandle = ^Handle(0) @@ -166,6 +168,8 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *SecurityAttributes) (handle Handle, err error) [failretval==InvalidHandle] = CreateNamedPipeW //sys ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) //sys DisconnectNamedPipe(pipe Handle) (err error) +//sys GetNamedPipeClientProcessId(pipe Handle, clientProcessID *uint32) (err error) +//sys GetNamedPipeServerProcessId(pipe Handle, serverProcessID *uint32) (err error) //sys GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) //sys GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW //sys SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32) (err error) = SetNamedPipeHandleState @@ -211,6 +215,10 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (handle Handle, err error) //sys ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) [failretval<=32] = shell32.ShellExecuteW //sys GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32, err error) = user32.GetWindowThreadProcessId +//sys LoadKeyboardLayout(name *uint16, flags uint32) (hkl Handle, err error) [failretval==0] = user32.LoadKeyboardLayoutW +//sys UnloadKeyboardLayout(hkl Handle) (err error) = user32.UnloadKeyboardLayout +//sys GetKeyboardLayout(tid uint32) (hkl Handle) = user32.GetKeyboardLayout +//sys ToUnicodeEx(vkey uint32, scancode uint32, keystate *byte, pwszBuff *uint16, cchBuff int32, flags uint32, hkl Handle) (ret int32) = user32.ToUnicodeEx //sys GetShellWindow() (shellWindow HWND) = user32.GetShellWindow //sys MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) [failretval==0] = user32.MessageBoxW //sys ExitWindowsEx(flags uint32, reason uint32) (err error) = user32.ExitWindowsEx @@ -307,6 +315,10 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys SetConsoleMode(console Handle, mode uint32) (err error) = kernel32.SetConsoleMode //sys GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) = kernel32.GetConsoleScreenBufferInfo //sys setConsoleCursorPosition(console Handle, position uint32) (err error) = kernel32.SetConsoleCursorPosition +//sys GetConsoleCP() (cp uint32, err error) = kernel32.GetConsoleCP +//sys GetConsoleOutputCP() (cp uint32, err error) = kernel32.GetConsoleOutputCP +//sys SetConsoleCP(cp uint32) (err error) = kernel32.SetConsoleCP +//sys SetConsoleOutputCP(cp uint32) (err error) = kernel32.SetConsoleOutputCP //sys WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW //sys ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW //sys resizePseudoConsole(pconsole Handle, size uint32) (hr error) = kernel32.ResizePseudoConsole @@ -715,20 +727,12 @@ func DurationSinceBoot() time.Duration { } func Ftruncate(fd Handle, length int64) (err error) { - curoffset, e := Seek(fd, 0, 1) - if e != nil { - return e - } - defer Seek(fd, curoffset, 0) - _, e = Seek(fd, length, 0) - if e != nil { - return e + type _FILE_END_OF_FILE_INFO struct { + EndOfFile int64 } - e = SetEndOfFile(fd) - if e != nil { - return e - } - return nil + var info _FILE_END_OF_FILE_INFO + info.EndOfFile = length + return SetFileInformationByHandle(fd, FileEndOfFileInfo, (*byte)(unsafe.Pointer(&info)), uint32(unsafe.Sizeof(info))) } func Gettimeofday(tv *Timeval) (err error) { @@ -884,6 +888,11 @@ const socket_error = uintptr(^uint32(0)) //sys GetACP() (acp uint32) = kernel32.GetACP //sys MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) = kernel32.MultiByteToWideChar //sys getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcode error) = iphlpapi.GetBestInterfaceEx +//sys GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) = iphlpapi.GetIfEntry2Ex +//sys GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) = iphlpapi.GetUnicastIpAddressEntry +//sys NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyIpInterfaceChange +//sys NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyUnicastIpAddressChange +//sys CancelMibChangeNotify2(notificationHandle Handle) (errcode error) = iphlpapi.CancelMibChangeNotify2 // For testing: clients can set this flag to force // creation of IPv6 sockets to return EAFNOSUPPORT. @@ -1368,9 +1377,11 @@ func SetsockoptLinger(fd Handle, level, opt int, l *Linger) (err error) { func SetsockoptInet4Addr(fd Handle, level, opt int, value [4]byte) (err error) { return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(&value[0])), 4) } + func SetsockoptIPMreq(fd Handle, level, opt int, mreq *IPMreq) (err error) { return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(mreq)), int32(unsafe.Sizeof(*mreq))) } + func SetsockoptIPv6Mreq(fd Handle, level, opt int, mreq *IPv6Mreq) (err error) { return syscall.EWINDOWS } @@ -1673,13 +1684,16 @@ func (s NTStatus) Error() string { // do not use NTUnicodeString, and instead UTF16PtrFromString should be used for // the more common *uint16 string type. func NewNTUnicodeString(s string) (*NTUnicodeString, error) { - var u NTUnicodeString - s16, err := UTF16PtrFromString(s) + s16, err := UTF16FromString(s) if err != nil { return nil, err } - RtlInitUnicodeString(&u, s16) - return &u, nil + n := uint16(len(s16) * 2) + return &NTUnicodeString{ + Length: n - 2, // subtract 2 bytes for the NULL terminator + MaximumLength: n, + Buffer: &s16[0], + }, nil } // Slice returns a uint16 slice that aliases the data in the NTUnicodeString. diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index d8cb71db..9d138de5 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -176,6 +176,7 @@ const ( WAIT_FAILED = 0xFFFFFFFF // Access rights for process. + PROCESS_ALL_ACCESS = 0xFFFF PROCESS_CREATE_PROCESS = 0x0080 PROCESS_CREATE_THREAD = 0x0002 PROCESS_DUP_HANDLE = 0x0040 @@ -1060,6 +1061,7 @@ const ( SIO_GET_EXTENSION_FUNCTION_POINTER = IOC_INOUT | IOC_WS2 | 6 SIO_KEEPALIVE_VALS = IOC_IN | IOC_VENDOR | 4 SIO_UDP_CONNRESET = IOC_IN | IOC_VENDOR | 12 + SIO_UDP_NETRESET = IOC_IN | IOC_VENDOR | 15 // cf. http://support.microsoft.com/default.aspx?scid=kb;en-us;257460 @@ -2003,7 +2005,21 @@ const ( MOVEFILE_FAIL_IF_NOT_TRACKABLE = 0x20 ) -const GAA_FLAG_INCLUDE_PREFIX = 0x00000010 +// Flags for GetAdaptersAddresses, see +// https://learn.microsoft.com/en-us/windows/win32/api/iphlpapi/nf-iphlpapi-getadaptersaddresses. +const ( + GAA_FLAG_SKIP_UNICAST = 0x1 + GAA_FLAG_SKIP_ANYCAST = 0x2 + GAA_FLAG_SKIP_MULTICAST = 0x4 + GAA_FLAG_SKIP_DNS_SERVER = 0x8 + GAA_FLAG_INCLUDE_PREFIX = 0x10 + GAA_FLAG_SKIP_FRIENDLY_NAME = 0x20 + GAA_FLAG_INCLUDE_WINS_INFO = 0x40 + GAA_FLAG_INCLUDE_GATEWAYS = 0x80 + GAA_FLAG_INCLUDE_ALL_INTERFACES = 0x100 + GAA_FLAG_INCLUDE_ALL_COMPARTMENTS = 0x200 + GAA_FLAG_INCLUDE_TUNNEL_BINDINGORDER = 0x400 +) const ( IF_TYPE_OTHER = 1 @@ -2017,6 +2033,50 @@ const ( IF_TYPE_IEEE1394 = 144 ) +// Enum NL_PREFIX_ORIGIN for [IpAdapterUnicastAddress], see +// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_prefix_origin +const ( + IpPrefixOriginOther = 0 + IpPrefixOriginManual = 1 + IpPrefixOriginWellKnown = 2 + IpPrefixOriginDhcp = 3 + IpPrefixOriginRouterAdvertisement = 4 + IpPrefixOriginUnchanged = 1 << 4 +) + +// Enum NL_SUFFIX_ORIGIN for [IpAdapterUnicastAddress], see +// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_suffix_origin +const ( + NlsoOther = 0 + NlsoManual = 1 + NlsoWellKnown = 2 + NlsoDhcp = 3 + NlsoLinkLayerAddress = 4 + NlsoRandom = 5 + IpSuffixOriginOther = 0 + IpSuffixOriginManual = 1 + IpSuffixOriginWellKnown = 2 + IpSuffixOriginDhcp = 3 + IpSuffixOriginLinkLayerAddress = 4 + IpSuffixOriginRandom = 5 + IpSuffixOriginUnchanged = 1 << 4 +) + +// Enum NL_DAD_STATE for [IpAdapterUnicastAddress], see +// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_dad_state +const ( + NldsInvalid = 0 + NldsTentative = 1 + NldsDuplicate = 2 + NldsDeprecated = 3 + NldsPreferred = 4 + IpDadStateInvalid = 0 + IpDadStateTentative = 1 + IpDadStateDuplicate = 2 + IpDadStateDeprecated = 3 + IpDadStatePreferred = 4 +) + type SocketAddress struct { Sockaddr *syscall.RawSockaddrAny SockaddrLength int32 @@ -2144,6 +2204,132 @@ const ( IfOperStatusLowerLayerDown = 7 ) +const ( + IF_MAX_PHYS_ADDRESS_LENGTH = 32 + IF_MAX_STRING_SIZE = 256 +) + +// MIB_IF_ENTRY_LEVEL enumeration from netioapi.h or +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/nf-netioapi-getifentry2ex. +const ( + MibIfEntryNormal = 0 + MibIfEntryNormalWithoutStatistics = 2 +) + +// MIB_NOTIFICATION_TYPE enumeration from netioapi.h or +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ne-netioapi-mib_notification_type. +const ( + MibParameterNotification = 0 + MibAddInstance = 1 + MibDeleteInstance = 2 + MibInitialNotification = 3 +) + +// MibIfRow2 stores information about a particular interface. See +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_if_row2. +type MibIfRow2 struct { + InterfaceLuid uint64 + InterfaceIndex uint32 + InterfaceGuid GUID + Alias [IF_MAX_STRING_SIZE + 1]uint16 + Description [IF_MAX_STRING_SIZE + 1]uint16 + PhysicalAddressLength uint32 + PhysicalAddress [IF_MAX_PHYS_ADDRESS_LENGTH]uint8 + PermanentPhysicalAddress [IF_MAX_PHYS_ADDRESS_LENGTH]uint8 + Mtu uint32 + Type uint32 + TunnelType uint32 + MediaType uint32 + PhysicalMediumType uint32 + AccessType uint32 + DirectionType uint32 + InterfaceAndOperStatusFlags uint8 + OperStatus uint32 + AdminStatus uint32 + MediaConnectState uint32 + NetworkGuid GUID + ConnectionType uint32 + TransmitLinkSpeed uint64 + ReceiveLinkSpeed uint64 + InOctets uint64 + InUcastPkts uint64 + InNUcastPkts uint64 + InDiscards uint64 + InErrors uint64 + InUnknownProtos uint64 + InUcastOctets uint64 + InMulticastOctets uint64 + InBroadcastOctets uint64 + OutOctets uint64 + OutUcastPkts uint64 + OutNUcastPkts uint64 + OutDiscards uint64 + OutErrors uint64 + OutUcastOctets uint64 + OutMulticastOctets uint64 + OutBroadcastOctets uint64 + OutQLen uint64 +} + +// MIB_UNICASTIPADDRESS_ROW stores information about a unicast IP address. See +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_unicastipaddress_row. +type MibUnicastIpAddressRow struct { + Address RawSockaddrInet6 // SOCKADDR_INET union + InterfaceLuid uint64 + InterfaceIndex uint32 + PrefixOrigin uint32 + SuffixOrigin uint32 + ValidLifetime uint32 + PreferredLifetime uint32 + OnLinkPrefixLength uint8 + SkipAsSource uint8 + DadState uint32 + ScopeId uint32 + CreationTimeStamp Filetime +} + +const ScopeLevelCount = 16 + +// MIB_IPINTERFACE_ROW stores interface management information for a particular IP address family on a network interface. +// See https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_ipinterface_row. +type MibIpInterfaceRow struct { + Family uint16 + InterfaceLuid uint64 + InterfaceIndex uint32 + MaxReassemblySize uint32 + InterfaceIdentifier uint64 + MinRouterAdvertisementInterval uint32 + MaxRouterAdvertisementInterval uint32 + AdvertisingEnabled uint8 + ForwardingEnabled uint8 + WeakHostSend uint8 + WeakHostReceive uint8 + UseAutomaticMetric uint8 + UseNeighborUnreachabilityDetection uint8 + ManagedAddressConfigurationSupported uint8 + OtherStatefulConfigurationSupported uint8 + AdvertiseDefaultRoute uint8 + RouterDiscoveryBehavior uint32 + DadTransmits uint32 + BaseReachableTime uint32 + RetransmitTime uint32 + PathMtuDiscoveryTimeout uint32 + LinkLocalAddressBehavior uint32 + LinkLocalAddressTimeout uint32 + ZoneIndices [ScopeLevelCount]uint32 + SitePrefixLength uint32 + Metric uint32 + NlMtu uint32 + Connected uint8 + SupportsWakeUpPatterns uint8 + SupportsNeighborDiscovery uint8 + SupportsRouterDiscovery uint8 + ReachableTime uint32 + TransmitOffload uint32 + ReceiveOffload uint32 + DisableDefaultRoutes uint8 +} + // Console related constants used for the mode parameter to SetConsoleMode. See // https://docs.microsoft.com/en-us/windows/console/setconsolemode for details. @@ -3404,3 +3590,14 @@ type DCB struct { EvtChar byte wReserved1 uint16 } + +// Keyboard Layout Flags. +// See https://learn.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-loadkeyboardlayoutw +const ( + KLF_ACTIVATE = 0x00000001 + KLF_SUBSTITUTE_OK = 0x00000002 + KLF_REORDER = 0x00000008 + KLF_REPLACELANG = 0x00000010 + KLF_NOTELLSHELL = 0x00000080 + KLF_SETFORPROCESS = 0x00000100 +) diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 5c6035dd..01c0716c 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -91,6 +91,7 @@ var ( procEnumServicesStatusExW = modadvapi32.NewProc("EnumServicesStatusExW") procEqualSid = modadvapi32.NewProc("EqualSid") procFreeSid = modadvapi32.NewProc("FreeSid") + procGetAce = modadvapi32.NewProc("GetAce") procGetLengthSid = modadvapi32.NewProc("GetLengthSid") procGetNamedSecurityInfoW = modadvapi32.NewProc("GetNamedSecurityInfoW") procGetSecurityDescriptorControl = modadvapi32.NewProc("GetSecurityDescriptorControl") @@ -180,10 +181,15 @@ var ( procDnsRecordListFree = moddnsapi.NewProc("DnsRecordListFree") procDwmGetWindowAttribute = moddwmapi.NewProc("DwmGetWindowAttribute") procDwmSetWindowAttribute = moddwmapi.NewProc("DwmSetWindowAttribute") + procCancelMibChangeNotify2 = modiphlpapi.NewProc("CancelMibChangeNotify2") procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses") procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo") procGetBestInterfaceEx = modiphlpapi.NewProc("GetBestInterfaceEx") procGetIfEntry = modiphlpapi.NewProc("GetIfEntry") + procGetIfEntry2Ex = modiphlpapi.NewProc("GetIfEntry2Ex") + procGetUnicastIpAddressEntry = modiphlpapi.NewProc("GetUnicastIpAddressEntry") + procNotifyIpInterfaceChange = modiphlpapi.NewProc("NotifyIpInterfaceChange") + procNotifyUnicastIpAddressChange = modiphlpapi.NewProc("NotifyUnicastIpAddressChange") procAddDllDirectory = modkernel32.NewProc("AddDllDirectory") procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") procCancelIo = modkernel32.NewProc("CancelIo") @@ -246,7 +252,9 @@ var ( procGetCommandLineW = modkernel32.NewProc("GetCommandLineW") procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW") procGetComputerNameW = modkernel32.NewProc("GetComputerNameW") + procGetConsoleCP = modkernel32.NewProc("GetConsoleCP") procGetConsoleMode = modkernel32.NewProc("GetConsoleMode") + procGetConsoleOutputCP = modkernel32.NewProc("GetConsoleOutputCP") procGetConsoleScreenBufferInfo = modkernel32.NewProc("GetConsoleScreenBufferInfo") procGetCurrentDirectoryW = modkernel32.NewProc("GetCurrentDirectoryW") procGetCurrentProcessId = modkernel32.NewProc("GetCurrentProcessId") @@ -272,8 +280,10 @@ var ( procGetMaximumProcessorCount = modkernel32.NewProc("GetMaximumProcessorCount") procGetModuleFileNameW = modkernel32.NewProc("GetModuleFileNameW") procGetModuleHandleExW = modkernel32.NewProc("GetModuleHandleExW") + procGetNamedPipeClientProcessId = modkernel32.NewProc("GetNamedPipeClientProcessId") procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") + procGetNamedPipeServerProcessId = modkernel32.NewProc("GetNamedPipeServerProcessId") procGetOverlappedResult = modkernel32.NewProc("GetOverlappedResult") procGetPriorityClass = modkernel32.NewProc("GetPriorityClass") procGetProcAddress = modkernel32.NewProc("GetProcAddress") @@ -346,8 +356,10 @@ var ( procSetCommMask = modkernel32.NewProc("SetCommMask") procSetCommState = modkernel32.NewProc("SetCommState") procSetCommTimeouts = modkernel32.NewProc("SetCommTimeouts") + procSetConsoleCP = modkernel32.NewProc("SetConsoleCP") procSetConsoleCursorPosition = modkernel32.NewProc("SetConsoleCursorPosition") procSetConsoleMode = modkernel32.NewProc("SetConsoleMode") + procSetConsoleOutputCP = modkernel32.NewProc("SetConsoleOutputCP") procSetCurrentDirectoryW = modkernel32.NewProc("SetCurrentDirectoryW") procSetDefaultDllDirectories = modkernel32.NewProc("SetDefaultDllDirectories") procSetDllDirectoryW = modkernel32.NewProc("SetDllDirectoryW") @@ -401,6 +413,7 @@ var ( procTransmitFile = modmswsock.NewProc("TransmitFile") procNetApiBufferFree = modnetapi32.NewProc("NetApiBufferFree") procNetGetJoinInformation = modnetapi32.NewProc("NetGetJoinInformation") + procNetUserEnum = modnetapi32.NewProc("NetUserEnum") procNetUserGetInfo = modnetapi32.NewProc("NetUserGetInfo") procNtCreateFile = modntdll.NewProc("NtCreateFile") procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile") @@ -476,12 +489,16 @@ var ( procGetDesktopWindow = moduser32.NewProc("GetDesktopWindow") procGetForegroundWindow = moduser32.NewProc("GetForegroundWindow") procGetGUIThreadInfo = moduser32.NewProc("GetGUIThreadInfo") + procGetKeyboardLayout = moduser32.NewProc("GetKeyboardLayout") procGetShellWindow = moduser32.NewProc("GetShellWindow") procGetWindowThreadProcessId = moduser32.NewProc("GetWindowThreadProcessId") procIsWindow = moduser32.NewProc("IsWindow") procIsWindowUnicode = moduser32.NewProc("IsWindowUnicode") procIsWindowVisible = moduser32.NewProc("IsWindowVisible") + procLoadKeyboardLayoutW = moduser32.NewProc("LoadKeyboardLayoutW") procMessageBoxW = moduser32.NewProc("MessageBoxW") + procToUnicodeEx = moduser32.NewProc("ToUnicodeEx") + procUnloadKeyboardLayout = moduser32.NewProc("UnloadKeyboardLayout") procCreateEnvironmentBlock = moduserenv.NewProc("CreateEnvironmentBlock") procDestroyEnvironmentBlock = moduserenv.NewProc("DestroyEnvironmentBlock") procGetUserProfileDirectoryW = moduserenv.NewProc("GetUserProfileDirectoryW") @@ -787,6 +804,14 @@ func FreeSid(sid *SID) (err error) { return } +func GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (err error) { + r1, _, e1 := syscall.Syscall(procGetAce.Addr(), 3, uintptr(unsafe.Pointer(acl)), uintptr(aceIndex), uintptr(unsafe.Pointer(pAce))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetLengthSid(sid *SID) (len uint32) { r0, _, _ := syscall.Syscall(procGetLengthSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) len = uint32(r0) @@ -1588,6 +1613,14 @@ func DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, si return } +func CancelMibChangeNotify2(notificationHandle Handle) (errcode error) { + r0, _, _ := syscall.Syscall(procCancelMibChangeNotify2.Addr(), 1, uintptr(notificationHandle), 0, 0) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) { r0, _, _ := syscall.Syscall6(procGetAdaptersAddresses.Addr(), 5, uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)), 0) if r0 != 0 { @@ -1620,6 +1653,46 @@ func GetIfEntry(pIfRow *MibIfRow) (errcode error) { return } +func GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) { + r0, _, _ := syscall.Syscall(procGetIfEntry2Ex.Addr(), 2, uintptr(level), uintptr(unsafe.Pointer(row)), 0) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) { + r0, _, _ := syscall.Syscall(procGetUnicastIpAddressEntry.Addr(), 1, uintptr(unsafe.Pointer(row)), 0, 0) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) { + var _p0 uint32 + if initialNotification { + _p0 = 1 + } + r0, _, _ := syscall.Syscall6(procNotifyIpInterfaceChange.Addr(), 5, uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)), 0) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) { + var _p0 uint32 + if initialNotification { + _p0 = 1 + } + r0, _, _ := syscall.Syscall6(procNotifyUnicastIpAddressChange.Addr(), 5, uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)), 0) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + func AddDllDirectory(path *uint16) (cookie uintptr, err error) { r0, _, e1 := syscall.Syscall(procAddDllDirectory.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) cookie = uintptr(r0) @@ -2148,6 +2221,15 @@ func GetComputerName(buf *uint16, n *uint32) (err error) { return } +func GetConsoleCP() (cp uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetConsoleCP.Addr(), 0, 0, 0, 0) + cp = uint32(r0) + if cp == 0 { + err = errnoErr(e1) + } + return +} + func GetConsoleMode(console Handle, mode *uint32) (err error) { r1, _, e1 := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(mode)), 0) if r1 == 0 { @@ -2156,6 +2238,15 @@ func GetConsoleMode(console Handle, mode *uint32) (err error) { return } +func GetConsoleOutputCP() (cp uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetConsoleOutputCP.Addr(), 0, 0, 0, 0) + cp = uint32(r0) + if cp == 0 { + err = errnoErr(e1) + } + return +} + func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) { r1, _, e1 := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(info)), 0) if r1 == 0 { @@ -2357,6 +2448,14 @@ func GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err er return } +func GetNamedPipeClientProcessId(pipe Handle, clientProcessID *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetNamedPipeClientProcessId.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(clientProcessID)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0) if r1 == 0 { @@ -2373,6 +2472,14 @@ func GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint3 return } +func GetNamedPipeServerProcessId(pipe Handle, serverProcessID *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetNamedPipeServerProcessId.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(serverProcessID)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wait bool) (err error) { var _p0 uint32 if wait { @@ -3024,6 +3131,14 @@ func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { return } +func SetConsoleCP(cp uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetConsoleCP.Addr(), 1, uintptr(cp), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func setConsoleCursorPosition(console Handle, position uint32) (err error) { r1, _, e1 := syscall.Syscall(procSetConsoleCursorPosition.Addr(), 2, uintptr(console), uintptr(position), 0) if r1 == 0 { @@ -3040,6 +3155,14 @@ func SetConsoleMode(console Handle, mode uint32) (err error) { return } +func SetConsoleOutputCP(cp uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetConsoleOutputCP.Addr(), 1, uintptr(cp), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SetCurrentDirectory(path *uint16) (err error) { r1, _, e1 := syscall.Syscall(procSetCurrentDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) if r1 == 0 { @@ -3486,6 +3609,14 @@ func NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (nete return } +func NetUserEnum(serverName *uint16, level uint32, filter uint32, buf **byte, prefMaxLen uint32, entriesRead *uint32, totalEntries *uint32, resumeHandle *uint32) (neterr error) { + r0, _, _ := syscall.Syscall9(procNetUserEnum.Addr(), 8, uintptr(unsafe.Pointer(serverName)), uintptr(level), uintptr(filter), uintptr(unsafe.Pointer(buf)), uintptr(prefMaxLen), uintptr(unsafe.Pointer(entriesRead)), uintptr(unsafe.Pointer(totalEntries)), uintptr(unsafe.Pointer(resumeHandle)), 0) + if r0 != 0 { + neterr = syscall.Errno(r0) + } + return +} + func NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) { r0, _, _ := syscall.Syscall6(procNetUserGetInfo.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(unsafe.Pointer(buf)), 0, 0) if r0 != 0 { @@ -4064,6 +4195,12 @@ func GetGUIThreadInfo(thread uint32, info *GUIThreadInfo) (err error) { return } +func GetKeyboardLayout(tid uint32) (hkl Handle) { + r0, _, _ := syscall.Syscall(procGetKeyboardLayout.Addr(), 1, uintptr(tid), 0, 0) + hkl = Handle(r0) + return +} + func GetShellWindow() (shellWindow HWND) { r0, _, _ := syscall.Syscall(procGetShellWindow.Addr(), 0, 0, 0, 0) shellWindow = HWND(r0) @@ -4097,6 +4234,15 @@ func IsWindowVisible(hwnd HWND) (isVisible bool) { return } +func LoadKeyboardLayout(name *uint16, flags uint32) (hkl Handle, err error) { + r0, _, e1 := syscall.Syscall(procLoadKeyboardLayoutW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(flags), 0) + hkl = Handle(r0) + if hkl == 0 { + err = errnoErr(e1) + } + return +} + func MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) { r0, _, e1 := syscall.Syscall6(procMessageBoxW.Addr(), 4, uintptr(hwnd), uintptr(unsafe.Pointer(text)), uintptr(unsafe.Pointer(caption)), uintptr(boxtype), 0, 0) ret = int32(r0) @@ -4106,6 +4252,20 @@ func MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret i return } +func ToUnicodeEx(vkey uint32, scancode uint32, keystate *byte, pwszBuff *uint16, cchBuff int32, flags uint32, hkl Handle) (ret int32) { + r0, _, _ := syscall.Syscall9(procToUnicodeEx.Addr(), 7, uintptr(vkey), uintptr(scancode), uintptr(unsafe.Pointer(keystate)), uintptr(unsafe.Pointer(pwszBuff)), uintptr(cchBuff), uintptr(flags), uintptr(hkl), 0, 0) + ret = int32(r0) + return +} + +func UnloadKeyboardLayout(hkl Handle) (err error) { + r1, _, e1 := syscall.Syscall(procUnloadKeyboardLayout.Addr(), 1, uintptr(hkl), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (err error) { var _p0 uint32 if inheritExisting { diff --git a/vendor/golang.org/x/text/LICENSE b/vendor/golang.org/x/text/LICENSE index 6a66aea5..2a7cf70d 100644 --- a/vendor/golang.org/x/text/LICENSE +++ b/vendor/golang.org/x/text/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go index a45f112b..24bc98ac 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go @@ -84,7 +84,7 @@ type decoder struct { } // newError returns an error object with position info. -func (d decoder) newError(pos int, f string, x ...interface{}) error { +func (d decoder) newError(pos int, f string, x ...any) error { line, column := d.Position(pos) head := fmt.Sprintf("(line %d:%d): ", line, column) return errors.New(head+f, x...) @@ -96,7 +96,7 @@ func (d decoder) unexpectedTokenError(tok text.Token) error { } // syntaxError returns a syntax error for given position. -func (d decoder) syntaxError(pos int, f string, x ...interface{}) error { +func (d decoder) syntaxError(pos int, f string, x ...any) error { line, column := d.Position(pos) head := fmt.Sprintf("syntax error (line %d:%d): ", line, column) return errors.New(head+f, x...) diff --git a/vendor/google.golang.org/protobuf/internal/descopts/options.go b/vendor/google.golang.org/protobuf/internal/descopts/options.go index 8401be8c..024ffebd 100644 --- a/vendor/google.golang.org/protobuf/internal/descopts/options.go +++ b/vendor/google.golang.org/protobuf/internal/descopts/options.go @@ -9,7 +9,7 @@ // dependency on the descriptor proto package). package descopts -import pref "google.golang.org/protobuf/reflect/protoreflect" +import "google.golang.org/protobuf/reflect/protoreflect" // These variables are set by the init function in descriptor.pb.go via logic // in internal/filetype. In other words, so long as the descriptor proto package @@ -17,13 +17,13 @@ import pref "google.golang.org/protobuf/reflect/protoreflect" // // Each variable is populated with a nil pointer to the options struct. var ( - File pref.ProtoMessage - Enum pref.ProtoMessage - EnumValue pref.ProtoMessage - Message pref.ProtoMessage - Field pref.ProtoMessage - Oneof pref.ProtoMessage - ExtensionRange pref.ProtoMessage - Service pref.ProtoMessage - Method pref.ProtoMessage + File protoreflect.ProtoMessage + Enum protoreflect.ProtoMessage + EnumValue protoreflect.ProtoMessage + Message protoreflect.ProtoMessage + Field protoreflect.ProtoMessage + Oneof protoreflect.ProtoMessage + ExtensionRange protoreflect.ProtoMessage + Service protoreflect.ProtoMessage + Method protoreflect.ProtoMessage ) diff --git a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb index ff6a3836..2c0693d7 100644 Binary files a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb and b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb differ diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go index 87853e78..099b2bf4 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go @@ -601,7 +601,7 @@ func (d *Decoder) consumeToken(kind Kind, size int, attrs uint8) Token { // newSyntaxError returns a syntax error with line and column information for // current position. -func (d *Decoder) newSyntaxError(f string, x ...interface{}) error { +func (d *Decoder) newSyntaxError(f string, x ...any) error { e := errors.New(f, x...) line, column := d.Position(len(d.orig) - len(d.in)) return errors.New("syntax error (line %d:%d): %v", line, column, e) diff --git a/vendor/google.golang.org/protobuf/internal/errors/errors.go b/vendor/google.golang.org/protobuf/internal/errors/errors.go index d9671982..c2d6bd52 100644 --- a/vendor/google.golang.org/protobuf/internal/errors/errors.go +++ b/vendor/google.golang.org/protobuf/internal/errors/errors.go @@ -17,7 +17,7 @@ var Error = errors.New("protobuf error") // New formats a string according to the format specifier and arguments and // returns an error that has a "proto" prefix. -func New(f string, x ...interface{}) error { +func New(f string, x ...any) error { return &prefixError{s: format(f, x...)} } @@ -43,7 +43,7 @@ func (e *prefixError) Unwrap() error { // Wrap returns an error that has a "proto" prefix, the formatted string described // by the format specifier and arguments, and a suffix of err. The error wraps err. -func Wrap(err error, f string, x ...interface{}) error { +func Wrap(err error, f string, x ...any) error { return &wrapError{ s: format(f, x...), err: err, @@ -67,7 +67,7 @@ func (e *wrapError) Is(target error) bool { return target == Error } -func format(f string, x ...interface{}) string { +func format(f string, x ...any) string { // avoid "proto: " prefix when chaining for i := 0; i < len(x); i++ { switch e := x[i].(type) { diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index ece53bea..f3252985 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -32,6 +32,7 @@ const ( EditionProto2 Edition = 998 EditionProto3 Edition = 999 Edition2023 Edition = 1000 + Edition2024 Edition = 1001 EditionUnsupported Edition = 100000 ) @@ -77,28 +78,42 @@ type ( Locations SourceLocations } + // EditionFeatures is a frequently-instantiated struct, so please take care + // to minimize padding when adding new fields to this struct (add them in + // the right place/order). EditionFeatures struct { + // StripEnumPrefix determines if the plugin generates enum value + // constants as-is, with their prefix stripped, or both variants. + StripEnumPrefix int + // IsFieldPresence is true if field_presence is EXPLICIT // https://protobuf.dev/editions/features/#field_presence IsFieldPresence bool + // IsFieldPresence is true if field_presence is LEGACY_REQUIRED // https://protobuf.dev/editions/features/#field_presence IsLegacyRequired bool + // IsOpenEnum is true if enum_type is OPEN // https://protobuf.dev/editions/features/#enum_type IsOpenEnum bool + // IsPacked is true if repeated_field_encoding is PACKED // https://protobuf.dev/editions/features/#repeated_field_encoding IsPacked bool + // IsUTF8Validated is true if utf_validation is VERIFY // https://protobuf.dev/editions/features/#utf8_validation IsUTF8Validated bool + // IsDelimitedEncoded is true if message_encoding is DELIMITED // https://protobuf.dev/editions/features/#message_encoding IsDelimitedEncoded bool + // IsJSONCompliant is true if json_format is ALLOW // https://protobuf.dev/editions/features/#json_format IsJSONCompliant bool + // GenerateLegacyUnmarshalJSON determines if the plugin generates the // UnmarshalJSON([]byte) error method for enums. GenerateLegacyUnmarshalJSON bool @@ -258,6 +273,7 @@ type ( StringName stringName IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto IsWeak bool // promoted from google.protobuf.FieldOptions + IsLazy bool // promoted from google.protobuf.FieldOptions Default defaultValue ContainingOneof protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields Enum protoreflect.EnumDescriptor @@ -351,6 +367,7 @@ func (fd *Field) IsPacked() bool { } func (fd *Field) IsExtension() bool { return false } func (fd *Field) IsWeak() bool { return fd.L1.IsWeak } +func (fd *Field) IsLazy() bool { return fd.L1.IsLazy } func (fd *Field) IsList() bool { return fd.Cardinality() == protoreflect.Repeated && !fd.IsMap() } func (fd *Field) IsMap() bool { return fd.Message() != nil && fd.Message().IsMapEntry() } func (fd *Field) MapKey() protoreflect.FieldDescriptor { @@ -383,6 +400,10 @@ func (fd *Field) Message() protoreflect.MessageDescriptor { } return fd.L1.Message } +func (fd *Field) IsMapEntry() bool { + parent, ok := fd.L0.Parent.(protoreflect.MessageDescriptor) + return ok && parent.IsMapEntry() +} func (fd *Field) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) } func (fd *Field) ProtoType(protoreflect.FieldDescriptor) {} @@ -421,6 +442,7 @@ type ( Extendee protoreflect.MessageDescriptor Cardinality protoreflect.Cardinality Kind protoreflect.Kind + IsLazy bool EditionFeatures EditionFeatures } ExtensionL2 struct { @@ -461,6 +483,7 @@ func (xd *Extension) IsPacked() bool { } func (xd *Extension) IsExtension() bool { return true } func (xd *Extension) IsWeak() bool { return false } +func (xd *Extension) IsLazy() bool { return xd.L1.IsLazy } func (xd *Extension) IsList() bool { return xd.Cardinality() == protoreflect.Repeated } func (xd *Extension) IsMap() bool { return false } func (xd *Extension) MapKey() protoreflect.FieldDescriptor { return nil } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go index 3bc3b1cd..d2f54949 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go @@ -495,6 +495,8 @@ func (xd *Extension) unmarshalOptions(b []byte) { switch num { case genid.FieldOptions_Packed_field_number: xd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v) + case genid.FieldOptions_Lazy_field_number: + xd.L1.IsLazy = protowire.DecodeBool(v) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) @@ -534,7 +536,7 @@ func (sd *Service) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protor } var nameBuilderPool = sync.Pool{ - New: func() interface{} { return new(strs.Builder) }, + New: func() any { return new(strs.Builder) }, } func getBuilder() *strs.Builder { diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index 570181eb..67a51b32 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -45,6 +45,11 @@ func (file *File) resolveMessages() { case protoreflect.MessageKind, protoreflect.GroupKind: fd.L1.Message = file.resolveMessageDependency(fd.L1.Message, listFieldDeps, depIdx) depIdx++ + if fd.L1.Kind == protoreflect.GroupKind && (fd.IsMap() || fd.IsMapEntry()) { + // A map field might inherit delimited encoding from a file-wide default feature. + // But maps never actually use delimited encoding. (At least for now...) + fd.L1.Kind = protoreflect.MessageKind + } } // Default is resolved here since it depends on Enum being resolved. @@ -499,6 +504,8 @@ func (fd *Field) unmarshalOptions(b []byte) { fd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v) case genid.FieldOptions_Weak_field_number: fd.L1.IsWeak = protowire.DecodeBool(v) + case genid.FieldOptions_Lazy_field_number: + fd.L1.IsLazy = protowire.DecodeBool(v) case FieldOptions_EnforceUTF8: fd.L1.EditionFeatures.IsUTF8Validated = protowire.DecodeBool(v) } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go index 11f5f356..7611796e 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go @@ -32,6 +32,10 @@ func unmarshalGoFeature(b []byte, parent EditionFeatures) EditionFeatures { v, m := protowire.ConsumeVarint(b) b = b[m:] parent.GenerateLegacyUnmarshalJSON = protowire.DecodeBool(v) + case genid.GoFeatures_StripEnumPrefix_field_number: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + parent.StripEnumPrefix = int(v) default: panic(fmt.Sprintf("unkown field number %d while unmarshalling GoFeatures", num)) } @@ -68,7 +72,7 @@ func unmarshalFeatureSet(b []byte, parent EditionFeatures) EditionFeatures { v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number: + case genid.FeatureSet_Go_ext_number: parent = unmarshalGoFeature(v, parent) } } diff --git a/vendor/google.golang.org/protobuf/internal/filetype/build.go b/vendor/google.golang.org/protobuf/internal/filetype/build.go index f0e38c4e..ba83fea4 100644 --- a/vendor/google.golang.org/protobuf/internal/filetype/build.go +++ b/vendor/google.golang.org/protobuf/internal/filetype/build.go @@ -68,7 +68,7 @@ type Builder struct { // and for input and output messages referenced by service methods. // Dependencies must come after declarations, but the ordering of // dependencies themselves is unspecified. - GoTypes []interface{} + GoTypes []any // DependencyIndexes is an ordered list of indexes into GoTypes for the // dependencies of messages, extensions, or services. @@ -268,7 +268,7 @@ func (x depIdxs) Get(i, j int32) int32 { type ( resolverByIndex struct { - goTypes []interface{} + goTypes []any depIdxs depIdxs fileRegistry } diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go index 1447a119..f30ab6b5 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go @@ -860,11 +860,13 @@ const ( EnumValueOptions_Deprecated_field_name protoreflect.Name = "deprecated" EnumValueOptions_Features_field_name protoreflect.Name = "features" EnumValueOptions_DebugRedact_field_name protoreflect.Name = "debug_redact" + EnumValueOptions_FeatureSupport_field_name protoreflect.Name = "feature_support" EnumValueOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" EnumValueOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.deprecated" EnumValueOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.features" EnumValueOptions_DebugRedact_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.debug_redact" + EnumValueOptions_FeatureSupport_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.feature_support" EnumValueOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.uninterpreted_option" ) @@ -873,6 +875,7 @@ const ( EnumValueOptions_Deprecated_field_number protoreflect.FieldNumber = 1 EnumValueOptions_Features_field_number protoreflect.FieldNumber = 2 EnumValueOptions_DebugRedact_field_number protoreflect.FieldNumber = 3 + EnumValueOptions_FeatureSupport_field_number protoreflect.FieldNumber = 4 EnumValueOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) diff --git a/vendor/google.golang.org/protobuf/internal/genid/doc.go b/vendor/google.golang.org/protobuf/internal/genid/doc.go index 45ccd012..d9b9d916 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/doc.go +++ b/vendor/google.golang.org/protobuf/internal/genid/doc.go @@ -6,6 +6,6 @@ // and the well-known types. package genid -import protoreflect "google.golang.org/protobuf/reflect/protoreflect" +import "google.golang.org/protobuf/reflect/protoreflect" const GoogleProtobuf_package protoreflect.FullName = "google.protobuf" diff --git a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go index 9a652a2b..09792d96 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go @@ -12,20 +12,42 @@ import ( const File_google_protobuf_go_features_proto = "google/protobuf/go_features.proto" -// Names for google.protobuf.GoFeatures. +// Names for pb.GoFeatures. const ( GoFeatures_message_name protoreflect.Name = "GoFeatures" - GoFeatures_message_fullname protoreflect.FullName = "google.protobuf.GoFeatures" + GoFeatures_message_fullname protoreflect.FullName = "pb.GoFeatures" ) -// Field names for google.protobuf.GoFeatures. +// Field names for pb.GoFeatures. const ( GoFeatures_LegacyUnmarshalJsonEnum_field_name protoreflect.Name = "legacy_unmarshal_json_enum" + GoFeatures_StripEnumPrefix_field_name protoreflect.Name = "strip_enum_prefix" - GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "google.protobuf.GoFeatures.legacy_unmarshal_json_enum" + GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "pb.GoFeatures.legacy_unmarshal_json_enum" + GoFeatures_StripEnumPrefix_field_fullname protoreflect.FullName = "pb.GoFeatures.strip_enum_prefix" ) -// Field numbers for google.protobuf.GoFeatures. +// Field numbers for pb.GoFeatures. const ( GoFeatures_LegacyUnmarshalJsonEnum_field_number protoreflect.FieldNumber = 1 + GoFeatures_StripEnumPrefix_field_number protoreflect.FieldNumber = 3 +) + +// Full and short names for pb.GoFeatures.StripEnumPrefix. +const ( + GoFeatures_StripEnumPrefix_enum_fullname = "pb.GoFeatures.StripEnumPrefix" + GoFeatures_StripEnumPrefix_enum_name = "StripEnumPrefix" +) + +// Enum values for pb.GoFeatures.StripEnumPrefix. +const ( + GoFeatures_STRIP_ENUM_PREFIX_UNSPECIFIED_enum_value = 0 + GoFeatures_STRIP_ENUM_PREFIX_KEEP_enum_value = 1 + GoFeatures_STRIP_ENUM_PREFIX_GENERATE_BOTH_enum_value = 2 + GoFeatures_STRIP_ENUM_PREFIX_STRIP_enum_value = 3 +) + +// Extension numbers +const ( + FeatureSet_Go_ext_number protoreflect.FieldNumber = 1002 ) diff --git a/vendor/google.golang.org/protobuf/internal/genid/map_entry.go b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go index 8f9ea02f..bef5a25f 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/map_entry.go +++ b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go @@ -4,7 +4,7 @@ package genid -import protoreflect "google.golang.org/protobuf/reflect/protoreflect" +import "google.golang.org/protobuf/reflect/protoreflect" // Generic field names and numbers for synthetic map entry messages. const ( diff --git a/vendor/google.golang.org/protobuf/internal/genid/wrappers.go b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go index 429384b8..9404270d 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/wrappers.go +++ b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go @@ -4,7 +4,7 @@ package genid -import protoreflect "google.golang.org/protobuf/reflect/protoreflect" +import "google.golang.org/protobuf/reflect/protoreflect" // Generic field name and number for messages in wrappers.proto. const ( diff --git a/vendor/google.golang.org/protobuf/internal/impl/api_export.go b/vendor/google.golang.org/protobuf/internal/impl/api_export.go index a371f98d..5d5771c2 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/api_export.go +++ b/vendor/google.golang.org/protobuf/internal/impl/api_export.go @@ -22,13 +22,13 @@ type Export struct{} // NewError formats a string according to the format specifier and arguments and // returns an error that has a "proto" prefix. -func (Export) NewError(f string, x ...interface{}) error { +func (Export) NewError(f string, x ...any) error { return errors.New(f, x...) } // enum is any enum type generated by protoc-gen-go // and must be a named int32 type. -type enum = interface{} +type enum = any // EnumOf returns the protoreflect.Enum interface over e. // It returns nil if e is nil. @@ -81,7 +81,7 @@ func (Export) EnumStringOf(ed protoreflect.EnumDescriptor, n protoreflect.EnumNu // message is any message type generated by protoc-gen-go // and must be a pointer to a named struct type. -type message = interface{} +type message = any // legacyMessageWrapper wraps a v2 message as a v1 message. type legacyMessageWrapper struct{ m protoreflect.ProtoMessage } diff --git a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go index bff041ed..f29e6a8f 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go +++ b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go @@ -68,7 +68,7 @@ func (mi *MessageInfo) isInitExtensions(ext *map[int32]ExtensionField) error { } for _, x := range *ext { ei := getExtensionFieldInfo(x.Type()) - if ei.funcs.isInit == nil { + if ei.funcs.isInit == nil || x.isUnexpandedLazy() { continue } v := x.Value() diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go index 2b8f122c..0d5b546e 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go @@ -67,7 +67,6 @@ type lazyExtensionValue struct { xi *extensionFieldInfo value protoreflect.Value b []byte - fn func() protoreflect.Value } type ExtensionField struct { @@ -99,6 +98,28 @@ func (f *ExtensionField) canLazy(xt protoreflect.ExtensionType) bool { return false } +// isUnexpandedLazy returns true if the ExensionField is lazy and not +// yet expanded, which means it's present and already checked for +// initialized required fields. +func (f *ExtensionField) isUnexpandedLazy() bool { + return f.lazy != nil && atomic.LoadUint32(&f.lazy.atomicOnce) == 0 +} + +// lazyBuffer retrieves the buffer for a lazy extension if it's not yet expanded. +// +// The returned buffer has to be kept over whatever operation we're planning, +// as re-retrieving it will fail after the message is lazily decoded. +func (f *ExtensionField) lazyBuffer() []byte { + // This function might be in the critical path, so check the atomic without + // taking a look first, then only take the lock if needed. + if !f.isUnexpandedLazy() { + return nil + } + f.lazy.mu.Lock() + defer f.lazy.mu.Unlock() + return f.lazy.b +} + func (f *ExtensionField) lazyInit() { f.lazy.mu.Lock() defer f.lazy.mu.Unlock() @@ -136,10 +157,9 @@ func (f *ExtensionField) lazyInit() { } f.lazy.value = val } else { - f.lazy.value = f.lazy.fn() + panic("No support for lazy fns for ExtensionField") } f.lazy.xi = nil - f.lazy.fn = nil f.lazy.b = nil atomic.StoreUint32(&f.lazy.atomicOnce, 1) } @@ -152,13 +172,6 @@ func (f *ExtensionField) Set(t protoreflect.ExtensionType, v protoreflect.Value) f.lazy = nil } -// SetLazy sets the type and a value that is to be lazily evaluated upon first use. -// This must not be called concurrently. -func (f *ExtensionField) SetLazy(t protoreflect.ExtensionType, fn func() protoreflect.Value) { - f.typ = t - f.lazy = &lazyExtensionValue{fn: fn} -} - // Value returns the value of the extension field. // This may be called concurrently. func (f *ExtensionField) Value() protoreflect.Value { diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go index 78ee47e4..7c1f66c8 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go @@ -65,6 +65,9 @@ func (mi *MessageInfo) initOneofFieldCoders(od protoreflect.OneofDescriptor, si if err != nil { return out, err } + if cf.funcs.isInit == nil { + out.initialized = true + } vi.Set(vw) return out, nil } diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go index 6b2fdbb7..78be9df3 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go @@ -189,6 +189,9 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { if mi.methods.Merge == nil { mi.methods.Merge = mi.merge } + if mi.methods.Equal == nil { + mi.methods.Equal = equal + } } // getUnknownBytes returns a *[]byte for the unknown fields. diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go index b7a23faf..7a16ec13 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go @@ -26,6 +26,15 @@ func sizeMessageSet(mi *MessageInfo, p pointer, opts marshalOptions) (size int) } num, _ := protowire.DecodeTag(xi.wiretag) size += messageset.SizeField(num) + if fullyLazyExtensions(opts) { + // Don't expand the extension, instead use the buffer to calculate size + if lb := x.lazyBuffer(); lb != nil { + // We got hold of the buffer, so it's still lazy. + // Don't count the tag size in the extension buffer, it's already added. + size += protowire.SizeTag(messageset.FieldMessage) + len(lb) - xi.tagsize + continue + } + } size += xi.funcs.size(x.Value(), protowire.SizeTag(messageset.FieldMessage), opts) } @@ -85,6 +94,19 @@ func marshalMessageSetField(mi *MessageInfo, b []byte, x ExtensionField, opts ma xi := getExtensionFieldInfo(x.Type()) num, _ := protowire.DecodeTag(xi.wiretag) b = messageset.AppendFieldStart(b, num) + + if fullyLazyExtensions(opts) { + // Don't expand the extension if it's still in wire format, instead use the buffer content. + if lb := x.lazyBuffer(); lb != nil { + // The tag inside the lazy buffer is a different tag (the extension + // number), but what we need here is the tag for FieldMessage: + b = protowire.AppendVarint(b, protowire.EncodeTag(messageset.FieldMessage, protowire.BytesType)) + b = append(b, lb[xi.tagsize:]...) + b = messageset.AppendFieldEnd(b) + return b, nil + } + } + b, err := xi.funcs.marshal(b, x.Value(), protowire.EncodeTag(messageset.FieldMessage, protowire.BytesType), opts) if err != nil { return b, err diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go deleted file mode 100644 index 145c577b..00000000 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package impl - -import ( - "reflect" - - "google.golang.org/protobuf/encoding/protowire" -) - -func sizeEnum(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { - v := p.v.Elem().Int() - return f.tagsize + protowire.SizeVarint(uint64(v)) -} - -func appendEnum(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := p.v.Elem().Int() - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, uint64(v)) - return b, nil -} - -func consumeEnum(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.VarintType { - return out, errUnknown - } - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return out, errDecode - } - p.v.Elem().SetInt(int64(v)) - out.n = n - return out, nil -} - -func mergeEnum(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - dst.v.Elem().Set(src.v.Elem()) -} - -var coderEnum = pointerCoderFuncs{ - size: sizeEnum, - marshal: appendEnum, - unmarshal: consumeEnum, - merge: mergeEnum, -} - -func sizeEnumNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - if p.v.Elem().Int() == 0 { - return 0 - } - return sizeEnum(p, f, opts) -} - -func appendEnumNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - if p.v.Elem().Int() == 0 { - return b, nil - } - return appendEnum(b, p, f, opts) -} - -func mergeEnumNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - if src.v.Elem().Int() != 0 { - dst.v.Elem().Set(src.v.Elem()) - } -} - -var coderEnumNoZero = pointerCoderFuncs{ - size: sizeEnumNoZero, - marshal: appendEnumNoZero, - unmarshal: consumeEnum, - merge: mergeEnumNoZero, -} - -func sizeEnumPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - return sizeEnum(pointer{p.v.Elem()}, f, opts) -} - -func appendEnumPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - return appendEnum(b, pointer{p.v.Elem()}, f, opts) -} - -func consumeEnumPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.VarintType { - return out, errUnknown - } - if p.v.Elem().IsNil() { - p.v.Elem().Set(reflect.New(p.v.Elem().Type().Elem())) - } - return consumeEnum(b, pointer{p.v.Elem()}, wtyp, f, opts) -} - -func mergeEnumPtr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - if !src.v.Elem().IsNil() { - v := reflect.New(dst.v.Type().Elem().Elem()) - v.Elem().Set(src.v.Elem().Elem()) - dst.v.Elem().Set(v) - } -} - -var coderEnumPtr = pointerCoderFuncs{ - size: sizeEnumPtr, - marshal: appendEnumPtr, - unmarshal: consumeEnumPtr, - merge: mergeEnumPtr, -} - -func sizeEnumSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - s := p.v.Elem() - for i, llen := 0, s.Len(); i < llen; i++ { - size += protowire.SizeVarint(uint64(s.Index(i).Int())) + f.tagsize - } - return size -} - -func appendEnumSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := p.v.Elem() - for i, llen := 0, s.Len(); i < llen; i++ { - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, uint64(s.Index(i).Int())) - } - return b, nil -} - -func consumeEnumSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - s := p.v.Elem() - if wtyp == protowire.BytesType { - b, n := protowire.ConsumeBytes(b) - if n < 0 { - return out, errDecode - } - for len(b) > 0 { - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return out, errDecode - } - rv := reflect.New(s.Type().Elem()).Elem() - rv.SetInt(int64(v)) - s.Set(reflect.Append(s, rv)) - b = b[n:] - } - out.n = n - return out, nil - } - if wtyp != protowire.VarintType { - return out, errUnknown - } - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return out, errDecode - } - rv := reflect.New(s.Type().Elem()).Elem() - rv.SetInt(int64(v)) - s.Set(reflect.Append(s, rv)) - out.n = n - return out, nil -} - -func mergeEnumSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - dst.v.Elem().Set(reflect.AppendSlice(dst.v.Elem(), src.v.Elem())) -} - -var coderEnumSlice = pointerCoderFuncs{ - size: sizeEnumSlice, - marshal: appendEnumSlice, - unmarshal: consumeEnumSlice, - merge: mergeEnumSlice, -} - -func sizeEnumPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - s := p.v.Elem() - llen := s.Len() - if llen == 0 { - return 0 - } - n := 0 - for i := 0; i < llen; i++ { - n += protowire.SizeVarint(uint64(s.Index(i).Int())) - } - return f.tagsize + protowire.SizeBytes(n) -} - -func appendEnumPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := p.v.Elem() - llen := s.Len() - if llen == 0 { - return b, nil - } - b = protowire.AppendVarint(b, f.wiretag) - n := 0 - for i := 0; i < llen; i++ { - n += protowire.SizeVarint(uint64(s.Index(i).Int())) - } - b = protowire.AppendVarint(b, uint64(n)) - for i := 0; i < llen; i++ { - b = protowire.AppendVarint(b, uint64(s.Index(i).Int())) - } - return b, nil -} - -var coderEnumPackedSlice = pointerCoderFuncs{ - size: sizeEnumPackedSlice, - marshal: appendEnumPackedSlice, - unmarshal: consumeEnumSlice, - merge: mergeEnumSlice, -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go index 757642e2..077712c2 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go @@ -2,9 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine -// +build !purego,!appengine - package impl // When using unsafe pointers, we can just treat enum values as int32s. diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go index 185ef2ef..f72ddd88 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go @@ -14,7 +14,7 @@ import ( // unwrapper unwraps the value to the underlying value. // This is implemented by List and Map. type unwrapper interface { - protoUnwrap() interface{} + protoUnwrap() any } // A Converter coverts to/from Go reflect.Value types and protobuf protoreflect.Value types. @@ -322,7 +322,7 @@ func (c *stringConverter) PBValueOf(v reflect.Value) protoreflect.Value { return protoreflect.ValueOfString(v.Convert(stringType).String()) } func (c *stringConverter) GoValueOf(v protoreflect.Value) reflect.Value { - // pref.Value.String never panics, so we go through an interface + // protoreflect.Value.String never panics, so we go through an interface // conversion here to check the type. s := v.Interface().(string) if c.goType.Kind() == reflect.Slice && s == "" { diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go index f8913651..18cb96fd 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go @@ -136,6 +136,6 @@ func (ls *listReflect) NewElement() protoreflect.Value { func (ls *listReflect) IsValid() bool { return !ls.v.IsNil() } -func (ls *listReflect) protoUnwrap() interface{} { +func (ls *listReflect) protoUnwrap() any { return ls.v.Interface() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go index f30b0a05..304244a6 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go @@ -116,6 +116,6 @@ func (ms *mapReflect) NewValue() protoreflect.Value { func (ms *mapReflect) IsValid() bool { return !ms.v.IsNil() } -func (ms *mapReflect) protoUnwrap() interface{} { +func (ms *mapReflect) protoUnwrap() any { return ms.v.Interface() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/encode.go b/vendor/google.golang.org/protobuf/internal/impl/encode.go index 845c67d6..6254f5de 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/encode.go +++ b/vendor/google.golang.org/protobuf/internal/impl/encode.go @@ -10,7 +10,7 @@ import ( "sync/atomic" "google.golang.org/protobuf/internal/flags" - proto "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/proto" piface "google.golang.org/protobuf/runtime/protoiface" ) @@ -49,8 +49,11 @@ func (mi *MessageInfo) sizePointer(p pointer, opts marshalOptions) (size int) { return 0 } if opts.UseCachedSize() && mi.sizecacheOffset.IsValid() { - if size := atomic.LoadInt32(p.Apply(mi.sizecacheOffset).Int32()); size >= 0 { - return int(size) + // The size cache contains the size + 1, to allow the + // zero value to be invalid, while also allowing for a + // 0 size to be cached. + if size := atomic.LoadInt32(p.Apply(mi.sizecacheOffset).Int32()); size > 0 { + return int(size - 1) } } return mi.sizePointerSlow(p, opts) @@ -60,7 +63,7 @@ func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int if flags.ProtoLegacy && mi.isMessageSet { size = sizeMessageSet(mi, p, opts) if mi.sizecacheOffset.IsValid() { - atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size)) + atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size+1)) } return size } @@ -84,13 +87,16 @@ func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int } } if mi.sizecacheOffset.IsValid() { - if size > math.MaxInt32 { + if size > (math.MaxInt32 - 1) { // The size is too large for the int32 sizecache field. // We will need to recompute the size when encoding; // unfortunately expensive, but better than invalid output. - atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), -1) + atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), 0) } else { - atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size)) + // The size cache contains the size + 1, to allow the + // zero value to be invalid, while also allowing for a + // 0 size to be cached. + atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size+1)) } } return size @@ -149,6 +155,14 @@ func (mi *MessageInfo) marshalAppendPointer(b []byte, p pointer, opts marshalOpt return b, nil } +// fullyLazyExtensions returns true if we should attempt to keep extensions lazy over size and marshal. +func fullyLazyExtensions(opts marshalOptions) bool { + // When deterministic marshaling is requested, force an unmarshal for lazy + // extensions to produce a deterministic result, instead of passing through + // bytes lazily that may or may not match what Go Protobuf would produce. + return opts.flags&piface.MarshalDeterministic == 0 +} + func (mi *MessageInfo) sizeExtensions(ext *map[int32]ExtensionField, opts marshalOptions) (n int) { if ext == nil { return 0 @@ -158,6 +172,14 @@ func (mi *MessageInfo) sizeExtensions(ext *map[int32]ExtensionField, opts marsha if xi.funcs.size == nil { continue } + if fullyLazyExtensions(opts) { + // Don't expand the extension, instead use the buffer to calculate size + if lb := x.lazyBuffer(); lb != nil { + // We got hold of the buffer, so it's still lazy. + n += len(lb) + continue + } + } n += xi.funcs.size(x.Value(), xi.tagsize, opts) } return n @@ -176,6 +198,13 @@ func (mi *MessageInfo) appendExtensions(b []byte, ext *map[int32]ExtensionField, var err error for _, x := range *ext { xi := getExtensionFieldInfo(x.Type()) + if fullyLazyExtensions(opts) { + // Don't expand the extension if it's still in wire format, instead use the buffer content. + if lb := x.lazyBuffer(); lb != nil { + b = append(b, lb...) + continue + } + } b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts) } return b, err @@ -191,6 +220,13 @@ func (mi *MessageInfo) appendExtensions(b []byte, ext *map[int32]ExtensionField, for _, k := range keys { x := (*ext)[int32(k)] xi := getExtensionFieldInfo(x.Type()) + if fullyLazyExtensions(opts) { + // Don't expand the extension if it's still in wire format, instead use the buffer content. + if lb := x.lazyBuffer(); lb != nil { + b = append(b, lb...) + continue + } + } b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts) if err != nil { return b, err diff --git a/vendor/google.golang.org/protobuf/internal/impl/equal.go b/vendor/google.golang.org/protobuf/internal/impl/equal.go new file mode 100644 index 00000000..9f6c32a7 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/equal.go @@ -0,0 +1,224 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "bytes" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +func equal(in protoiface.EqualInput) protoiface.EqualOutput { + return protoiface.EqualOutput{Equal: equalMessage(in.MessageA, in.MessageB)} +} + +// equalMessage is a fast-path variant of protoreflect.equalMessage. +// It takes advantage of the internal messageState type to avoid +// unnecessary allocations, type assertions. +func equalMessage(mx, my protoreflect.Message) bool { + if mx == nil || my == nil { + return mx == my + } + if mx.Descriptor() != my.Descriptor() { + return false + } + + msx, ok := mx.(*messageState) + if !ok { + return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my)) + } + msy, ok := my.(*messageState) + if !ok { + return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my)) + } + + mi := msx.messageInfo() + miy := msy.messageInfo() + if mi != miy { + return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my)) + } + mi.init() + // Compares regular fields + // Modified Message.Range code that compares two messages of the same type + // while going over the fields. + for _, ri := range mi.rangeInfos { + var fd protoreflect.FieldDescriptor + var vx, vy protoreflect.Value + + switch ri := ri.(type) { + case *fieldInfo: + hx := ri.has(msx.pointer()) + hy := ri.has(msy.pointer()) + if hx != hy { + return false + } + if !hx { + continue + } + fd = ri.fieldDesc + vx = ri.get(msx.pointer()) + vy = ri.get(msy.pointer()) + case *oneofInfo: + fnx := ri.which(msx.pointer()) + fny := ri.which(msy.pointer()) + if fnx != fny { + return false + } + if fnx <= 0 { + continue + } + fi := mi.fields[fnx] + fd = fi.fieldDesc + vx = fi.get(msx.pointer()) + vy = fi.get(msy.pointer()) + } + + if !equalValue(fd, vx, vy) { + return false + } + } + + // Compare extensions. + // This is more complicated because mx or my could have empty/nil extension maps, + // however some populated extension map values are equal to nil extension maps. + emx := mi.extensionMap(msx.pointer()) + emy := mi.extensionMap(msy.pointer()) + if emx != nil { + for k, x := range *emx { + xd := x.Type().TypeDescriptor() + xv := x.Value() + var y ExtensionField + ok := false + if emy != nil { + y, ok = (*emy)[k] + } + // We need to treat empty lists as equal to nil values + if emy == nil || !ok { + if xd.IsList() && xv.List().Len() == 0 { + continue + } + return false + } + + if !equalValue(xd, xv, y.Value()) { + return false + } + } + } + if emy != nil { + // emy may have extensions emx does not have, need to check them as well + for k, y := range *emy { + if emx != nil { + // emx has the field, so we already checked it + if _, ok := (*emx)[k]; ok { + continue + } + } + // Empty lists are equal to nil + if y.Type().TypeDescriptor().IsList() && y.Value().List().Len() == 0 { + continue + } + + // Cant be equal if the extension is populated + return false + } + } + + return equalUnknown(mx.GetUnknown(), my.GetUnknown()) +} + +func equalValue(fd protoreflect.FieldDescriptor, vx, vy protoreflect.Value) bool { + // slow path + if fd.Kind() != protoreflect.MessageKind { + return vx.Equal(vy) + } + + // fast path special cases + if fd.IsMap() { + if fd.MapValue().Kind() == protoreflect.MessageKind { + return equalMessageMap(vx.Map(), vy.Map()) + } + return vx.Equal(vy) + } + + if fd.IsList() { + return equalMessageList(vx.List(), vy.List()) + } + + return equalMessage(vx.Message(), vy.Message()) +} + +// Mostly copied from protoreflect.equalMap. +// This variant only works for messages as map types. +// All other map types should be handled via Value.Equal. +func equalMessageMap(mx, my protoreflect.Map) bool { + if mx.Len() != my.Len() { + return false + } + equal := true + mx.Range(func(k protoreflect.MapKey, vx protoreflect.Value) bool { + if !my.Has(k) { + equal = false + return false + } + vy := my.Get(k) + equal = equalMessage(vx.Message(), vy.Message()) + return equal + }) + return equal +} + +// Mostly copied from protoreflect.equalList. +// The only change is the usage of equalImpl instead of protoreflect.equalValue. +func equalMessageList(lx, ly protoreflect.List) bool { + if lx.Len() != ly.Len() { + return false + } + for i := 0; i < lx.Len(); i++ { + // We only operate on messages here since equalImpl will not call us in any other case. + if !equalMessage(lx.Get(i).Message(), ly.Get(i).Message()) { + return false + } + } + return true +} + +// equalUnknown compares unknown fields by direct comparison on the raw bytes +// of each individual field number. +// Copied from protoreflect.equalUnknown. +func equalUnknown(x, y protoreflect.RawFields) bool { + if len(x) != len(y) { + return false + } + if bytes.Equal([]byte(x), []byte(y)) { + return true + } + + mx := make(map[protoreflect.FieldNumber]protoreflect.RawFields) + my := make(map[protoreflect.FieldNumber]protoreflect.RawFields) + for len(x) > 0 { + fnum, _, n := protowire.ConsumeField(x) + mx[fnum] = append(mx[fnum], x[:n]...) + x = x[n:] + } + for len(y) > 0 { + fnum, _, n := protowire.ConsumeField(y) + my[fnum] = append(my[fnum], y[:n]...) + y = y[n:] + } + if len(mx) != len(my) { + return false + } + + for k, v1 := range mx { + if v2, ok := my[k]; !ok || !bytes.Equal([]byte(v1), []byte(v2)) { + return false + } + } + + return true +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/extension.go b/vendor/google.golang.org/protobuf/internal/impl/extension.go index cb25b0ba..e31249f6 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/extension.go @@ -53,7 +53,7 @@ type ExtensionInfo struct { // type returned by InterfaceOf may not be identical. // // Deprecated: Use InterfaceOf(xt.Zero()) instead. - ExtensionType interface{} + ExtensionType any // Field is the field number of the extension. // @@ -95,16 +95,16 @@ func (xi *ExtensionInfo) New() protoreflect.Value { func (xi *ExtensionInfo) Zero() protoreflect.Value { return xi.lazyInit().Zero() } -func (xi *ExtensionInfo) ValueOf(v interface{}) protoreflect.Value { +func (xi *ExtensionInfo) ValueOf(v any) protoreflect.Value { return xi.lazyInit().PBValueOf(reflect.ValueOf(v)) } -func (xi *ExtensionInfo) InterfaceOf(v protoreflect.Value) interface{} { +func (xi *ExtensionInfo) InterfaceOf(v protoreflect.Value) any { return xi.lazyInit().GoValueOf(v).Interface() } func (xi *ExtensionInfo) IsValidValue(v protoreflect.Value) bool { return xi.lazyInit().IsValidPB(v) } -func (xi *ExtensionInfo) IsValidInterface(v interface{}) bool { +func (xi *ExtensionInfo) IsValidInterface(v any) bool { return xi.lazyInit().IsValidGo(reflect.ValueOf(v)) } func (xi *ExtensionInfo) TypeDescriptor() protoreflect.ExtensionTypeDescriptor { diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go index c1c33d00..81b2b1a7 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go @@ -97,7 +97,7 @@ func (e *legacyEnumWrapper) Number() protoreflect.EnumNumber { func (e *legacyEnumWrapper) ProtoReflect() protoreflect.Enum { return e } -func (e *legacyEnumWrapper) protoUnwrap() interface{} { +func (e *legacyEnumWrapper) protoUnwrap() any { v := reflect.New(e.goTyp).Elem() v.SetInt(int64(e.num)) return v.Interface() diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go index 6e8677ee..b6849d66 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go @@ -160,6 +160,7 @@ func (x placeholderExtension) HasPresence() bool func (x placeholderExtension) HasOptionalKeyword() bool { return false } func (x placeholderExtension) IsExtension() bool { return true } func (x placeholderExtension) IsWeak() bool { return false } +func (x placeholderExtension) IsLazy() bool { return false } func (x placeholderExtension) IsPacked() bool { return false } func (x placeholderExtension) IsList() bool { return false } func (x placeholderExtension) IsMap() bool { return false } diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go index 950e9a1f..bf0b6049 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go @@ -216,7 +216,7 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName } for _, fn := range methods { for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { - if vs, ok := v.Interface().([]interface{}); ok { + if vs, ok := v.Interface().([]any); ok { for _, v := range vs { oneofWrappers = append(oneofWrappers, reflect.TypeOf(v)) } @@ -567,6 +567,6 @@ func (m aberrantMessage) IsValid() bool { func (m aberrantMessage) ProtoMethods() *protoiface.Methods { return aberrantProtoMethods } -func (m aberrantMessage) protoUnwrap() interface{} { +func (m aberrantMessage) protoUnwrap() any { return m.v.Interface() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go index 629bacdc..741b5ed2 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -30,12 +30,12 @@ type MessageInfo struct { // Desc is the underlying message descriptor type and must be populated. Desc protoreflect.MessageDescriptor - // Exporter must be provided in a purego environment in order to provide - // access to unexported fields. + // Deprecated: Exporter will be removed the next time we bump + // protoimpl.GenVersion. See https://github.com/golang/protobuf/issues/1640 Exporter exporter // OneofWrappers is list of pointers to oneof wrapper struct types. - OneofWrappers []interface{} + OneofWrappers []any initMu sync.Mutex // protects all unexported fields initDone uint32 @@ -47,7 +47,7 @@ type MessageInfo struct { // exporter is a function that returns a reference to the ith field of v, // where v is a pointer to a struct. It returns nil if it does not support // exporting the requested field (e.g., already exported). -type exporter func(v interface{}, i int) interface{} +type exporter func(v any, i int) any // getMessageInfo returns the MessageInfo for any message type that // is generated by our implementation of protoc-gen-go (for v2 and on). @@ -201,7 +201,7 @@ fieldLoop: } for _, fn := range methods { for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { - if vs, ok := v.Interface().([]interface{}); ok { + if vs, ok := v.Interface().([]any); ok { oneofWrappers = vs } } @@ -256,7 +256,7 @@ func (mi *MessageInfo) Message(i int) protoreflect.MessageType { type mapEntryType struct { desc protoreflect.MessageDescriptor - valType interface{} // zero value of enum or message type + valType any // zero value of enum or message type } func (mt mapEntryType) New() protoreflect.Message { diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go index a6f0dbda..ecb4623d 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go @@ -20,7 +20,7 @@ type reflectMessageInfo struct { // fieldTypes contains the zero value of an enum or message field. // For lists, it contains the element type. // For maps, it contains the entry value type. - fieldTypes map[protoreflect.FieldNumber]interface{} + fieldTypes map[protoreflect.FieldNumber]any // denseFields is a subset of fields where: // 0 < fieldDesc.Number() < len(denseFields) @@ -28,7 +28,7 @@ type reflectMessageInfo struct { denseFields []*fieldInfo // rangeInfos is a list of all fields (not belonging to a oneof) and oneofs. - rangeInfos []interface{} // either *fieldInfo or *oneofInfo + rangeInfos []any // either *fieldInfo or *oneofInfo getUnknown func(pointer) protoreflect.RawFields setUnknown func(pointer, protoreflect.RawFields) @@ -224,7 +224,7 @@ func (mi *MessageInfo) makeFieldTypes(si structInfo) { } if ft != nil { if mi.fieldTypes == nil { - mi.fieldTypes = make(map[protoreflect.FieldNumber]interface{}) + mi.fieldTypes = make(map[protoreflect.FieldNumber]any) } mi.fieldTypes[fd.Number()] = reflect.Zero(ft).Interface() } @@ -255,6 +255,10 @@ func (m *extensionMap) Has(xd protoreflect.ExtensionTypeDescriptor) (ok bool) { if !ok { return false } + if x.isUnexpandedLazy() { + // Avoid calling x.Value(), which triggers a lazy unmarshal. + return true + } switch { case xd.IsList(): return x.Value().List().Len() > 0 @@ -389,7 +393,7 @@ var ( // MessageOf returns a reflective view over a message. The input must be a // pointer to a named Go struct. If the provided type has a ProtoReflect method, // it must be implemented by calling this method. -func (mi *MessageInfo) MessageOf(m interface{}) protoreflect.Message { +func (mi *MessageInfo) MessageOf(m any) protoreflect.Message { if reflect.TypeOf(m) != mi.GoReflectType { panic(fmt.Sprintf("type mismatch: got %T, want %v", m, mi.GoReflectType)) } @@ -417,7 +421,7 @@ func (m *messageIfaceWrapper) Reset() { func (m *messageIfaceWrapper) ProtoReflect() protoreflect.Message { return (*messageReflectWrapper)(m) } -func (m *messageIfaceWrapper) protoUnwrap() interface{} { +func (m *messageIfaceWrapper) protoUnwrap() any { return m.p.AsIfaceOf(m.mi.GoReflectType.Elem()) } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go index 29ba6bd3..99dc23c6 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go @@ -23,7 +23,7 @@ func (m *messageState) New() protoreflect.Message { func (m *messageState) Interface() protoreflect.ProtoMessage { return m.protoUnwrap().(protoreflect.ProtoMessage) } -func (m *messageState) protoUnwrap() interface{} { +func (m *messageState) protoUnwrap() any { return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem()) } func (m *messageState) ProtoMethods() *protoiface.Methods { @@ -154,7 +154,7 @@ func (m *messageReflectWrapper) Interface() protoreflect.ProtoMessage { } return (*messageIfaceWrapper)(m) } -func (m *messageReflectWrapper) protoUnwrap() interface{} { +func (m *messageReflectWrapper) protoUnwrap() any { return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem()) } func (m *messageReflectWrapper) ProtoMethods() *protoiface.Methods { diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go deleted file mode 100644 index 517e9443..00000000 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go +++ /dev/null @@ -1,215 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package impl - -import ( - "fmt" - "reflect" - "sync" -) - -const UnsafeEnabled = false - -// Pointer is an opaque pointer type. -type Pointer interface{} - -// offset represents the offset to a struct field, accessible from a pointer. -// The offset is the field index into a struct. -type offset struct { - index int - export exporter -} - -// offsetOf returns a field offset for the struct field. -func offsetOf(f reflect.StructField, x exporter) offset { - if len(f.Index) != 1 { - panic("embedded structs are not supported") - } - if f.PkgPath == "" { - return offset{index: f.Index[0]} // field is already exported - } - if x == nil { - panic("exporter must be provided for unexported field") - } - return offset{index: f.Index[0], export: x} -} - -// IsValid reports whether the offset is valid. -func (f offset) IsValid() bool { return f.index >= 0 } - -// invalidOffset is an invalid field offset. -var invalidOffset = offset{index: -1} - -// zeroOffset is a noop when calling pointer.Apply. -var zeroOffset = offset{index: 0} - -// pointer is an abstract representation of a pointer to a struct or field. -type pointer struct{ v reflect.Value } - -// pointerOf returns p as a pointer. -func pointerOf(p Pointer) pointer { - return pointerOfIface(p) -} - -// pointerOfValue returns v as a pointer. -func pointerOfValue(v reflect.Value) pointer { - return pointer{v: v} -} - -// pointerOfIface returns the pointer portion of an interface. -func pointerOfIface(v interface{}) pointer { - return pointer{v: reflect.ValueOf(v)} -} - -// IsNil reports whether the pointer is nil. -func (p pointer) IsNil() bool { - return p.v.IsNil() -} - -// Apply adds an offset to the pointer to derive a new pointer -// to a specified field. The current pointer must be pointing at a struct. -func (p pointer) Apply(f offset) pointer { - if f.export != nil { - if v := reflect.ValueOf(f.export(p.v.Interface(), f.index)); v.IsValid() { - return pointer{v: v} - } - } - return pointer{v: p.v.Elem().Field(f.index).Addr()} -} - -// AsValueOf treats p as a pointer to an object of type t and returns the value. -// It is equivalent to reflect.ValueOf(p.AsIfaceOf(t)) -func (p pointer) AsValueOf(t reflect.Type) reflect.Value { - if got := p.v.Type().Elem(); got != t { - panic(fmt.Sprintf("invalid type: got %v, want %v", got, t)) - } - return p.v -} - -// AsIfaceOf treats p as a pointer to an object of type t and returns the value. -// It is equivalent to p.AsValueOf(t).Interface() -func (p pointer) AsIfaceOf(t reflect.Type) interface{} { - return p.AsValueOf(t).Interface() -} - -func (p pointer) Bool() *bool { return p.v.Interface().(*bool) } -func (p pointer) BoolPtr() **bool { return p.v.Interface().(**bool) } -func (p pointer) BoolSlice() *[]bool { return p.v.Interface().(*[]bool) } -func (p pointer) Int32() *int32 { return p.v.Interface().(*int32) } -func (p pointer) Int32Ptr() **int32 { return p.v.Interface().(**int32) } -func (p pointer) Int32Slice() *[]int32 { return p.v.Interface().(*[]int32) } -func (p pointer) Int64() *int64 { return p.v.Interface().(*int64) } -func (p pointer) Int64Ptr() **int64 { return p.v.Interface().(**int64) } -func (p pointer) Int64Slice() *[]int64 { return p.v.Interface().(*[]int64) } -func (p pointer) Uint32() *uint32 { return p.v.Interface().(*uint32) } -func (p pointer) Uint32Ptr() **uint32 { return p.v.Interface().(**uint32) } -func (p pointer) Uint32Slice() *[]uint32 { return p.v.Interface().(*[]uint32) } -func (p pointer) Uint64() *uint64 { return p.v.Interface().(*uint64) } -func (p pointer) Uint64Ptr() **uint64 { return p.v.Interface().(**uint64) } -func (p pointer) Uint64Slice() *[]uint64 { return p.v.Interface().(*[]uint64) } -func (p pointer) Float32() *float32 { return p.v.Interface().(*float32) } -func (p pointer) Float32Ptr() **float32 { return p.v.Interface().(**float32) } -func (p pointer) Float32Slice() *[]float32 { return p.v.Interface().(*[]float32) } -func (p pointer) Float64() *float64 { return p.v.Interface().(*float64) } -func (p pointer) Float64Ptr() **float64 { return p.v.Interface().(**float64) } -func (p pointer) Float64Slice() *[]float64 { return p.v.Interface().(*[]float64) } -func (p pointer) String() *string { return p.v.Interface().(*string) } -func (p pointer) StringPtr() **string { return p.v.Interface().(**string) } -func (p pointer) StringSlice() *[]string { return p.v.Interface().(*[]string) } -func (p pointer) Bytes() *[]byte { return p.v.Interface().(*[]byte) } -func (p pointer) BytesPtr() **[]byte { return p.v.Interface().(**[]byte) } -func (p pointer) BytesSlice() *[][]byte { return p.v.Interface().(*[][]byte) } -func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.v.Interface().(*WeakFields)) } -func (p pointer) Extensions() *map[int32]ExtensionField { - return p.v.Interface().(*map[int32]ExtensionField) -} - -func (p pointer) Elem() pointer { - return pointer{v: p.v.Elem()} -} - -// PointerSlice copies []*T from p as a new []pointer. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) PointerSlice() []pointer { - // TODO: reconsider this - if p.v.IsNil() { - return nil - } - n := p.v.Elem().Len() - s := make([]pointer, n) - for i := 0; i < n; i++ { - s[i] = pointer{v: p.v.Elem().Index(i)} - } - return s -} - -// AppendPointerSlice appends v to p, which must be a []*T. -func (p pointer) AppendPointerSlice(v pointer) { - sp := p.v.Elem() - sp.Set(reflect.Append(sp, v.v)) -} - -// SetPointer sets *p to v. -func (p pointer) SetPointer(v pointer) { - p.v.Elem().Set(v.v) -} - -func growSlice(p pointer, addCap int) { - // TODO: Once we only support Go 1.20 and newer, use reflect.Grow. - in := p.v.Elem() - out := reflect.MakeSlice(in.Type(), in.Len(), in.Len()+addCap) - reflect.Copy(out, in) - p.v.Elem().Set(out) -} - -func (p pointer) growBoolSlice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growInt32Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growUint32Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growInt64Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growUint64Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growFloat64Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growFloat32Slice(addCap int) { - growSlice(p, addCap) -} - -func (Export) MessageStateOf(p Pointer) *messageState { panic("not supported") } -func (ms *messageState) pointer() pointer { panic("not supported") } -func (ms *messageState) messageInfo() *MessageInfo { panic("not supported") } -func (ms *messageState) LoadMessageInfo() *MessageInfo { panic("not supported") } -func (ms *messageState) StoreMessageInfo(mi *MessageInfo) { panic("not supported") } - -type atomicNilMessage struct { - once sync.Once - m messageReflectWrapper -} - -func (m *atomicNilMessage) Init(mi *MessageInfo) *messageReflectWrapper { - m.once.Do(func() { - m.m.p = pointerOfIface(reflect.Zero(mi.GoReflectType).Interface()) - m.m.mi = mi - }) - return &m.m -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go index 4b020e31..79e18666 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -2,9 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine -// +build !purego,!appengine - package impl import ( @@ -50,7 +47,7 @@ func pointerOfValue(v reflect.Value) pointer { } // pointerOfIface returns the pointer portion of an interface. -func pointerOfIface(v interface{}) pointer { +func pointerOfIface(v any) pointer { type ifaceHeader struct { Type unsafe.Pointer Data unsafe.Pointer @@ -80,7 +77,7 @@ func (p pointer) AsValueOf(t reflect.Type) reflect.Value { // AsIfaceOf treats p as a pointer to an object of type t and returns the value. // It is equivalent to p.AsValueOf(t).Interface() -func (p pointer) AsIfaceOf(t reflect.Type) interface{} { +func (p pointer) AsIfaceOf(t reflect.Type) any { // TODO: Use tricky unsafe magic to directly create ifaceHeader. return p.AsValueOf(t).Interface() } diff --git a/vendor/google.golang.org/protobuf/internal/order/range.go b/vendor/google.golang.org/protobuf/internal/order/range.go index 1665a68e..a1f09162 100644 --- a/vendor/google.golang.org/protobuf/internal/order/range.go +++ b/vendor/google.golang.org/protobuf/internal/order/range.go @@ -18,7 +18,7 @@ type messageField struct { } var messageFieldPool = sync.Pool{ - New: func() interface{} { return new([]messageField) }, + New: func() any { return new([]messageField) }, } type ( @@ -69,7 +69,7 @@ type mapEntry struct { } var mapEntryPool = sync.Pool{ - New: func() interface{} { return new([]mapEntry) }, + New: func() any { return new([]mapEntry) }, } type ( diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go b/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go deleted file mode 100644 index a1f6f333..00000000 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package strs - -import pref "google.golang.org/protobuf/reflect/protoreflect" - -func UnsafeString(b []byte) string { - return string(b) -} - -func UnsafeBytes(s string) []byte { - return []byte(s) -} - -type Builder struct{} - -func (*Builder) AppendFullName(prefix pref.FullName, name pref.Name) pref.FullName { - return prefix.Append(name) -} - -func (*Builder) MakeString(b []byte) string { - return string(b) -} diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go index a008acd0..832a7988 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine && !go1.21 -// +build !purego,!appengine,!go1.21 +//go:build !go1.21 package strs diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go index 60166f2b..1ffddf68 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine && go1.21 -// +build !purego,!appengine,go1.21 +//go:build go1.21 package strs diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index a3cba508..62a52a40 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -51,8 +51,8 @@ import ( // 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 34 - Patch = 1 + Minor = 35 + Patch = 2 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/equal.go b/vendor/google.golang.org/protobuf/proto/equal.go index 1a0be1b0..c36d4a9c 100644 --- a/vendor/google.golang.org/protobuf/proto/equal.go +++ b/vendor/google.golang.org/protobuf/proto/equal.go @@ -8,6 +8,7 @@ import ( "reflect" "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" ) // Equal reports whether two messages are equal, @@ -51,6 +52,14 @@ func Equal(x, y Message) bool { if mx.IsValid() != my.IsValid() { return false } + + // Only one of the messages needs to implement the fast-path for it to work. + pmx := protoMethods(mx) + pmy := protoMethods(my) + if pmx != nil && pmy != nil && pmx.Equal != nil && pmy.Equal != nil { + return pmx.Equal(protoiface.EqualInput{MessageA: mx, MessageB: my}).Equal + } + vx := protoreflect.ValueOfMessage(mx) vy := protoreflect.ValueOfMessage(my) return vx.Equal(vy) diff --git a/vendor/google.golang.org/protobuf/proto/extension.go b/vendor/google.golang.org/protobuf/proto/extension.go index c9c8721a..78445d11 100644 --- a/vendor/google.golang.org/protobuf/proto/extension.go +++ b/vendor/google.golang.org/protobuf/proto/extension.go @@ -39,7 +39,49 @@ func ClearExtension(m Message, xt protoreflect.ExtensionType) { // If the field is unpopulated, it returns the default value for // scalars and an immutable, empty value for lists or messages. // It panics if xt does not extend m. -func GetExtension(m Message, xt protoreflect.ExtensionType) interface{} { +// +// The type of the value is dependent on the field type of the extension. +// For extensions generated by protoc-gen-go, the Go type is as follows: +// +// ╔═══════════════════╤═════════════════════════╗ +// ║ Go type │ Protobuf kind ║ +// ╠═══════════════════╪═════════════════════════╣ +// ║ bool │ bool ║ +// ║ int32 │ int32, sint32, sfixed32 ║ +// ║ int64 │ int64, sint64, sfixed64 ║ +// ║ uint32 │ uint32, fixed32 ║ +// ║ uint64 │ uint64, fixed64 ║ +// ║ float32 │ float ║ +// ║ float64 │ double ║ +// ║ string │ string ║ +// ║ []byte │ bytes ║ +// ║ protoreflect.Enum │ enum ║ +// ║ proto.Message │ message, group ║ +// ╚═══════════════════╧═════════════════════════╝ +// +// The protoreflect.Enum and proto.Message types are the concrete Go type +// associated with the named enum or message. Repeated fields are represented +// using a Go slice of the base element type. +// +// If a generated extension descriptor variable is directly passed to +// GetExtension, then the call should be followed immediately by a +// type assertion to the expected output value. For example: +// +// mm := proto.GetExtension(m, foopb.E_MyExtension).(*foopb.MyMessage) +// +// This pattern enables static analysis tools to verify that the asserted type +// matches the Go type associated with the extension field and +// also enables a possible future migration to a type-safe extension API. +// +// Since singular messages are the most common extension type, the pattern of +// calling HasExtension followed by GetExtension may be simplified to: +// +// if mm := proto.GetExtension(m, foopb.E_MyExtension).(*foopb.MyMessage); mm != nil { +// ... // make use of mm +// } +// +// The mm variable is non-nil if and only if HasExtension reports true. +func GetExtension(m Message, xt protoreflect.ExtensionType) any { // Treat nil message interface as an empty message; return the default. if m == nil { return xt.InterfaceOf(xt.Zero()) @@ -51,7 +93,36 @@ func GetExtension(m Message, xt protoreflect.ExtensionType) interface{} { // SetExtension stores the value of an extension field. // It panics if m is invalid, xt does not extend m, or if type of v // is invalid for the specified extension field. -func SetExtension(m Message, xt protoreflect.ExtensionType, v interface{}) { +// +// The type of the value is dependent on the field type of the extension. +// For extensions generated by protoc-gen-go, the Go type is as follows: +// +// ╔═══════════════════╤═════════════════════════╗ +// ║ Go type │ Protobuf kind ║ +// ╠═══════════════════╪═════════════════════════╣ +// ║ bool │ bool ║ +// ║ int32 │ int32, sint32, sfixed32 ║ +// ║ int64 │ int64, sint64, sfixed64 ║ +// ║ uint32 │ uint32, fixed32 ║ +// ║ uint64 │ uint64, fixed64 ║ +// ║ float32 │ float ║ +// ║ float64 │ double ║ +// ║ string │ string ║ +// ║ []byte │ bytes ║ +// ║ protoreflect.Enum │ enum ║ +// ║ proto.Message │ message, group ║ +// ╚═══════════════════╧═════════════════════════╝ +// +// The protoreflect.Enum and proto.Message types are the concrete Go type +// associated with the named enum or message. Repeated fields are represented +// using a Go slice of the base element type. +// +// If a generated extension descriptor variable is directly passed to +// SetExtension (e.g., foopb.E_MyExtension), then the value should be a +// concrete type that matches the expected Go type for the extension descriptor +// so that static analysis tools can verify type correctness. +// This also enables a possible future migration to a type-safe extension API. +func SetExtension(m Message, xt protoreflect.ExtensionType, v any) { xd := xt.TypeDescriptor() pv := xt.ValueOf(v) @@ -78,7 +149,7 @@ func SetExtension(m Message, xt protoreflect.ExtensionType, v interface{}) { // It returns immediately if f returns false. // While iterating, mutating operations may only be performed // on the current extension field. -func RangeExtensions(m Message, f func(protoreflect.ExtensionType, interface{}) bool) { +func RangeExtensions(m Message, f func(protoreflect.ExtensionType, any) bool) { // Treat nil message interface as an empty message; nothing to range over. if m == nil { return diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go index d5d5af6e..742cb518 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go @@ -23,6 +23,7 @@ type ( Unmarshal func(unmarshalInput) (unmarshalOutput, error) Merge func(mergeInput) mergeOutput CheckInitialized func(checkInitializedInput) (checkInitializedOutput, error) + Equal func(equalInput) equalOutput } supportFlags = uint64 sizeInput = struct { @@ -75,4 +76,13 @@ type ( checkInitializedOutput = struct { pragma.NoUnkeyedLiterals } + equalInput = struct { + pragma.NoUnkeyedLiterals + MessageA Message + MessageB Message + } + equalOutput = struct { + pragma.NoUnkeyedLiterals + Equal bool + } ) diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go index 00102d31..ea154eec 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go @@ -485,6 +485,8 @@ func (p *SourcePath) appendEnumValueOptions(b []byte) []byte { b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 3: b = p.appendSingularField(b, "debug_redact", nil) + case 4: + b = p.appendSingularField(b, "feature_support", (*SourcePath).appendFieldOptions_FeatureSupport) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go index 5b80afe5..cd8fadba 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go @@ -510,7 +510,7 @@ type ExtensionType interface { // // ValueOf is more extensive than protoreflect.ValueOf for a given field's // value as it has more type information available. - ValueOf(interface{}) Value + ValueOf(any) Value // InterfaceOf completely unwraps the Value to the underlying Go type. // InterfaceOf panics if the input is nil or does not represent the @@ -519,13 +519,13 @@ type ExtensionType interface { // // InterfaceOf is able to unwrap the Value further than Value.Interface // as it has more type information available. - InterfaceOf(Value) interface{} + InterfaceOf(Value) any // IsValidValue reports whether the Value is valid to assign to the field. IsValidValue(Value) bool // IsValidInterface reports whether the input is valid to assign to the field. - IsValidInterface(interface{}) bool + IsValidInterface(any) bool } // EnumDescriptor describes an enum and diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go deleted file mode 100644 index 7ced876f..00000000 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package protoreflect - -import "google.golang.org/protobuf/internal/pragma" - -type valueType int - -const ( - nilType valueType = iota - boolType - int32Type - int64Type - uint32Type - uint64Type - float32Type - float64Type - stringType - bytesType - enumType - ifaceType -) - -// value is a union where only one type can be represented at a time. -// This uses a distinct field for each type. This is type safe in Go, but -// occupies more memory than necessary (72B). -type value struct { - pragma.DoNotCompare // 0B - - typ valueType // 8B - num uint64 // 8B - str string // 16B - bin []byte // 24B - iface interface{} // 16B -} - -func valueOfString(v string) Value { - return Value{typ: stringType, str: v} -} -func valueOfBytes(v []byte) Value { - return Value{typ: bytesType, bin: v} -} -func valueOfIface(v interface{}) Value { - return Value{typ: ifaceType, iface: v} -} - -func (v Value) getString() string { - return v.str -} -func (v Value) getBytes() []byte { - return v.bin -} -func (v Value) getIface() interface{} { - return v.iface -} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go index 16030973..9fe83cef 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go @@ -69,8 +69,8 @@ import ( // composite Value. Modifying an empty, read-only value panics. type Value value -// The protoreflect API uses a custom Value union type instead of interface{} -// to keep the future open for performance optimizations. Using an interface{} +// The protoreflect API uses a custom Value union type instead of any +// to keep the future open for performance optimizations. Using an any // always incurs an allocation for primitives (e.g., int64) since it needs to // be boxed on the heap (as interfaces can only contain pointers natively). // Instead, we represent the Value union as a flat struct that internally keeps @@ -85,7 +85,7 @@ type Value value // ValueOf returns a Value initialized with the concrete value stored in v. // This panics if the type does not match one of the allowed types in the // Value union. -func ValueOf(v interface{}) Value { +func ValueOf(v any) Value { switch v := v.(type) { case nil: return Value{} @@ -192,10 +192,10 @@ func (v Value) IsValid() bool { return v.typ != nilType } -// Interface returns v as an interface{}. +// Interface returns v as an any. // // Invariant: v == ValueOf(v).Interface() -func (v Value) Interface() interface{} { +func (v Value) Interface() any { switch v.typ { case nilType: return nil @@ -406,8 +406,8 @@ func (k MapKey) IsValid() bool { return Value(k).IsValid() } -// Interface returns k as an interface{}. -func (k MapKey) Interface() interface{} { +// Interface returns k as an any. +func (k MapKey) Interface() any { return Value(k).Interface() } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go index b1fdbe3e..0015fcb3 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine && !go1.21 -// +build !purego,!appengine,!go1.21 +//go:build !go1.21 package protoreflect @@ -45,7 +44,7 @@ var ( // typeOf returns a pointer to the Go type information. // The pointer is comparable and equal if and only if the types are identical. -func typeOf(t interface{}) unsafe.Pointer { +func typeOf(t any) unsafe.Pointer { return (*ifaceHeader)(unsafe.Pointer(&t)).Type } @@ -80,7 +79,7 @@ func valueOfBytes(v []byte) Value { p := (*sliceHeader)(unsafe.Pointer(&v)) return Value{typ: bytesType, ptr: p.Data, num: uint64(len(v))} } -func valueOfIface(v interface{}) Value { +func valueOfIface(v any) Value { p := (*ifaceHeader)(unsafe.Pointer(&v)) return Value{typ: p.Type, ptr: p.Data} } @@ -93,7 +92,7 @@ func (v Value) getBytes() (x []byte) { *(*sliceHeader)(unsafe.Pointer(&x)) = sliceHeader{Data: v.ptr, Len: int(v.num), Cap: int(v.num)} return x } -func (v Value) getIface() (x interface{}) { +func (v Value) getIface() (x any) { *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr} return x } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go index 43547011..479527b5 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine && go1.21 -// +build !purego,!appengine,go1.21 +//go:build go1.21 package protoreflect @@ -15,7 +14,7 @@ import ( type ( ifaceHeader struct { - _ [0]interface{} // if interfaces have greater alignment than unsafe.Pointer, this will enforce it. + _ [0]any // if interfaces have greater alignment than unsafe.Pointer, this will enforce it. Type unsafe.Pointer Data unsafe.Pointer } @@ -37,7 +36,7 @@ var ( // typeOf returns a pointer to the Go type information. // The pointer is comparable and equal if and only if the types are identical. -func typeOf(t interface{}) unsafe.Pointer { +func typeOf(t any) unsafe.Pointer { return (*ifaceHeader)(unsafe.Pointer(&t)).Type } @@ -70,7 +69,7 @@ func valueOfString(v string) Value { func valueOfBytes(v []byte) Value { return Value{typ: bytesType, ptr: unsafe.Pointer(unsafe.SliceData(v)), num: uint64(len(v))} } -func valueOfIface(v interface{}) Value { +func valueOfIface(v any) Value { p := (*ifaceHeader)(unsafe.Pointer(&v)) return Value{typ: p.Type, ptr: p.Data} } @@ -81,7 +80,7 @@ func (v Value) getString() string { func (v Value) getBytes() []byte { return unsafe.Slice((*byte)(v.ptr), v.num) } -func (v Value) getIface() (x interface{}) { +func (v Value) getIface() (x any) { *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr} return x } diff --git a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go index 6267dc52..de177733 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go +++ b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go @@ -95,7 +95,7 @@ type Files struct { // multiple files. Only top-level declarations are registered. // Note that enum values are in the top-level since that are in the same // scope as the parent enum. - descsByName map[protoreflect.FullName]interface{} + descsByName map[protoreflect.FullName]any filesByPath map[string][]protoreflect.FileDescriptor numFiles int } @@ -117,7 +117,7 @@ func (r *Files) RegisterFile(file protoreflect.FileDescriptor) error { defer globalMutex.Unlock() } if r.descsByName == nil { - r.descsByName = map[protoreflect.FullName]interface{}{ + r.descsByName = map[protoreflect.FullName]any{ "": &packageDescriptor{}, } r.filesByPath = make(map[string][]protoreflect.FileDescriptor) @@ -485,7 +485,7 @@ type Types struct { } type ( - typesByName map[protoreflect.FullName]interface{} + typesByName map[protoreflect.FullName]any extensionsByMessage map[protoreflect.FullName]extensionsByNumber extensionsByNumber map[protoreflect.FieldNumber]protoreflect.ExtensionType ) @@ -570,7 +570,7 @@ func (r *Types) RegisterExtension(xt protoreflect.ExtensionType) error { return nil } -func (r *Types) register(kind string, desc protoreflect.Descriptor, typ interface{}) error { +func (r *Types) register(kind string, desc protoreflect.Descriptor, typ any) error { name := desc.FullName() prev := r.typesByName[name] if prev != nil { @@ -841,7 +841,7 @@ func (r *Types) RangeExtensionsByMessage(message protoreflect.FullName, f func(p } } -func typeName(t interface{}) string { +func typeName(t any) string { switch t.(type) { case protoreflect.EnumType: return "enum" @@ -854,7 +854,7 @@ func typeName(t interface{}) string { } } -func amendErrorWithCaller(err error, prev, curr interface{}) error { +func amendErrorWithCaller(err error, prev, curr any) error { prevPkg := goPackage(prev) currPkg := goPackage(curr) if prevPkg == "" || currPkg == "" || prevPkg == currPkg { @@ -863,7 +863,7 @@ func amendErrorWithCaller(err error, prev, curr interface{}) error { return errors.New("%s\n\tpreviously from: %q\n\tcurrently from: %q", err, prevPkg, currPkg) } -func goPackage(v interface{}) string { +func goPackage(v any) string { switch d := v.(type) { case protoreflect.EnumType: v = d.Descriptor() diff --git a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go index 44cf467d..24615656 100644 --- a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go +++ b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go @@ -39,6 +39,9 @@ type Methods = struct { // CheckInitialized returns an error if any required fields in the message are not set. CheckInitialized func(CheckInitializedInput) (CheckInitializedOutput, error) + + // Equal compares two messages and returns EqualOutput.Equal == true if they are equal. + Equal func(EqualInput) EqualOutput } // SupportFlags indicate support for optional features. @@ -166,3 +169,18 @@ type CheckInitializedInput = struct { type CheckInitializedOutput = struct { pragma.NoUnkeyedLiterals } + +// EqualInput is input to the Equal method. +type EqualInput = struct { + pragma.NoUnkeyedLiterals + + MessageA protoreflect.Message + MessageB protoreflect.Message +} + +// EqualOutput is output from the Equal method. +type EqualOutput = struct { + pragma.NoUnkeyedLiterals + + Equal bool +} diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go index 81511a33..0d20722d 100644 --- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -254,11 +254,9 @@ func (x *Timestamp) check() uint { func (x *Timestamp) Reset() { *x = Timestamp{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_timestamp_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_timestamp_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Timestamp) String() string { @@ -269,7 +267,7 @@ func (*Timestamp) ProtoMessage() {} func (x *Timestamp) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_timestamp_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -332,7 +330,7 @@ func file_google_protobuf_timestamp_proto_rawDescGZIP() []byte { } var file_google_protobuf_timestamp_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_timestamp_proto_goTypes = []interface{}{ +var file_google_protobuf_timestamp_proto_goTypes = []any{ (*Timestamp)(nil), // 0: google.protobuf.Timestamp } var file_google_protobuf_timestamp_proto_depIdxs = []int32{ @@ -348,20 +346,6 @@ func file_google_protobuf_timestamp_proto_init() { if File_google_protobuf_timestamp_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_timestamp_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Timestamp); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/modules.txt b/vendor/modules.txt index 1f69c824..9ec263bb 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -38,10 +38,16 @@ github.com/cloudwego/base64x ## explicit; go 1.16 github.com/cloudwego/iasm/expr github.com/cloudwego/iasm/x86_64 -# github.com/confluentinc/confluent-kafka-go v1.9.2 -## explicit; go 1.13 -github.com/confluentinc/confluent-kafka-go/kafka -github.com/confluentinc/confluent-kafka-go/kafka/librdkafka_vendor +# github.com/confluentinc/confluent-kafka-go/v2 v2.6.1 +## explicit; go 1.21 +github.com/confluentinc/confluent-kafka-go/v2/kafka +github.com/confluentinc/confluent-kafka-go/v2/kafka/librdkafka_vendor +github.com/confluentinc/confluent-kafka-go/v2/schemaregistry +github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/cache +github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/internal +github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/rest +github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/serde +github.com/confluentinc/confluent-kafka-go/v2/schemaregistry/serde/avrov2 # github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc ## explicit github.com/davecgh/go-spew/spew @@ -93,12 +99,31 @@ github.com/gogo/protobuf/protoc-gen-gogo/descriptor # github.com/golang/snappy v0.0.4 ## explicit github.com/golang/snappy +# github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc +## explicit; go 1.21 +github.com/grafana/regexp +github.com/grafana/regexp/syntax +# github.com/hamba/avro/v2 v2.24.0 +## explicit; go 1.20 +github.com/hamba/avro/v2 +github.com/hamba/avro/v2/pkg/crc64 # github.com/json-iterator/go v1.1.12 ## explicit; go 1.12 github.com/json-iterator/go +# github.com/klauspost/compress v1.17.10 +## explicit; go 1.21 +github.com/klauspost/compress +github.com/klauspost/compress/fse +github.com/klauspost/compress/huff0 +github.com/klauspost/compress/internal/cpuinfo +github.com/klauspost/compress/internal/snapref +github.com/klauspost/compress/zstd +github.com/klauspost/compress/zstd/internal/xxhash # github.com/klauspost/cpuid/v2 v2.2.7 ## explicit; go 1.15 github.com/klauspost/cpuid/v2 +# github.com/kr/text v0.2.0 +## explicit # github.com/leodido/go-urn v1.4.0 ## explicit; go 1.18 github.com/leodido/go-urn @@ -109,12 +134,20 @@ github.com/linkedin/goavro # github.com/mattn/go-isatty v0.0.20 ## explicit; go 1.15 github.com/mattn/go-isatty +# github.com/mitchellh/mapstructure v1.5.0 +## explicit; go 1.14 +github.com/mitchellh/mapstructure +# github.com/moby/sys/userns v0.1.0 +## explicit; go 1.21 # github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd ## explicit github.com/modern-go/concurrent # github.com/modern-go/reflect2 v1.0.2 ## explicit; go 1.12 github.com/modern-go/reflect2 +# github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 +## explicit +github.com/munnerz/goautoneg # github.com/pelletier/go-toml/v2 v2.2.2 ## explicit; go 1.16 github.com/pelletier/go-toml/v2 @@ -125,33 +158,38 @@ github.com/pelletier/go-toml/v2/unstable # github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 ## explicit github.com/pmezard/go-difflib/difflib -# github.com/prometheus/client_golang v1.19.1 +# github.com/prometheus/client_golang v1.20.5 ## explicit; go 1.20 +github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil +github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus/internal github.com/prometheus/client_golang/prometheus/promhttp # github.com/prometheus/client_model v0.6.1 ## explicit; go 1.19 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.53.0 -## explicit; go 1.20 +# github.com/prometheus/common v0.61.0 +## explicit; go 1.21 github.com/prometheus/common/expfmt -github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg github.com/prometheus/common/model -# github.com/prometheus/procfs v0.12.0 -## explicit; go 1.19 +# github.com/prometheus/procfs v0.15.1 +## explicit; go 1.20 github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/prometheus/prometheus v0.52.1 -## explicit; go 1.21 +# github.com/prometheus/prometheus v0.300.1 +## explicit; go 1.22.0 +github.com/prometheus/prometheus/model/exemplar +github.com/prometheus/prometheus/model/histogram +github.com/prometheus/prometheus/model/labels github.com/prometheus/prometheus/prompb # github.com/sirupsen/logrus v1.9.3 ## explicit; go 1.13 github.com/sirupsen/logrus -# github.com/stretchr/testify v1.9.0 +# github.com/stretchr/testify v1.10.0 ## explicit; go 1.17 github.com/stretchr/testify/assert +github.com/stretchr/testify/assert/yaml # github.com/twitchyliquid64/golang-asm v0.15.1 ## explicit; go 1.13 github.com/twitchyliquid64/golang-asm/asm/arch @@ -177,10 +215,10 @@ github.com/ugorji/go/codec # golang.org/x/arch v0.8.0 ## explicit; go 1.18 golang.org/x/arch/x86/x86asm -# golang.org/x/crypto v0.23.0 -## explicit; go 1.18 +# golang.org/x/crypto v0.30.0 +## explicit; go 1.20 golang.org/x/crypto/sha3 -# golang.org/x/net v0.25.0 +# golang.org/x/net v0.32.0 ## explicit; go 1.18 golang.org/x/net/html golang.org/x/net/html/atom @@ -189,12 +227,12 @@ golang.org/x/net/http2 golang.org/x/net/http2/h2c golang.org/x/net/http2/hpack golang.org/x/net/idna -# golang.org/x/sys v0.20.0 +# golang.org/x/sys v0.28.0 ## explicit; go 1.18 golang.org/x/sys/cpu golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/text v0.15.0 +# golang.org/x/text v0.21.0 ## explicit; go 1.18 golang.org/x/text/internal/language golang.org/x/text/internal/language/compact @@ -204,8 +242,8 @@ golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# google.golang.org/protobuf v1.34.1 -## explicit; go 1.17 +# google.golang.org/protobuf v1.35.2 +## explicit; go 1.21 google.golang.org/protobuf/encoding/protodelim google.golang.org/protobuf/encoding/prototext google.golang.org/protobuf/encoding/protowire